diff --git a/Cargo.lock b/Cargo.lock index 90426e699..e561cb3a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -697,6 +697,7 @@ dependencies = [ "clap_complete", "clarinet-deployments", "clarinet-files", + "clarinet-format", "clarity-lsp", "clarity-repl", "crossbeam-channel", @@ -768,6 +769,14 @@ dependencies = [ "wasm-bindgen-futures", ] +[[package]] +name = "clarinet-format" +version = "0.1.0" +dependencies = [ + "clarity", + "pretty_assertions", +] + [[package]] name = "clarinet-sdk-wasm" version = "2.12.0" @@ -1421,6 +1430,12 @@ dependencies = [ "syn 2.0.82", ] +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.8.1" @@ -3506,6 +3521,16 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "pretty_assertions" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" +dependencies = [ + "diff", + "yansi", +] + [[package]] name = "prettytable-rs" version = "0.10.0" @@ -6488,9 +6513,9 @@ dependencies = [ [[package]] name = "yansi" -version = "1.0.0-rc.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1367295b8f788d371ce2dbc842c7b709c73ee1364d30351dd300ec2203b12377" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" dependencies = [ "is-terminal", ] diff --git a/Cargo.toml b/Cargo.toml index 93a7cc217..4df0258c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "components/clarinet-cli", "components/clarinet-deployments", "components/clarinet-files", + "components/clarinet-format", "components/clarinet-utils", "components/clarinet-sdk-wasm", "components/clarity-lsp", diff --git a/components/clarinet-cli/Cargo.toml b/components/clarinet-cli/Cargo.toml index d88174d09..d01311be6 100644 --- a/components/clarinet-cli/Cargo.toml +++ b/components/clarinet-cli/Cargo.toml @@ -40,6 +40,7 @@ clarity_repl = { package = "clarity-repl", path = "../clarity-repl", features = ] } clarinet-files = { path = "../clarinet-files", features = ["cli"] } clarity-lsp = { path = "../clarity-lsp", features = ["cli"] } +clarinet-format = { path = "../clarinet-format" } clarinet-deployments = { path = "../clarinet-deployments", features = ["cli"] } hiro-system-kit = { path = "../hiro-system-kit" } stacks-network = { path = "../stacks-network" } diff --git a/components/clarinet-cli/src/frontend/cli.rs b/components/clarinet-cli/src/frontend/cli.rs index 2f90d8f48..341f9def1 100644 --- a/components/clarinet-cli/src/frontend/cli.rs +++ b/components/clarinet-cli/src/frontend/cli.rs @@ -27,6 +27,7 @@ use clarinet_files::{ get_manifest_location, FileLocation, NetworkManifest, ProjectManifest, ProjectManifestFile, RequirementConfig, }; +use clarinet_format::formatter::{ClarityFormatter, Settings}; use clarity_repl::analysis::call_checker::ContractAnalysis; use clarity_repl::clarity::vm::analysis::AnalysisDatabase; use clarity_repl::clarity::vm::costs::LimitedCostTracker; @@ -39,7 +40,7 @@ use clarity_repl::{analysis, repl, Terminal}; use stacks_network::{self, DevnetOrchestrator}; use std::collections::HashMap; use std::fs::{self, File}; -use std::io::prelude::*; +use std::io::{self, prelude::*}; use std::{env, process}; use toml; @@ -94,11 +95,30 @@ enum Command { /// Get Clarity autocompletion and inline errors from your code editor (VSCode, vim, emacs, etc) #[clap(name = "lsp", bin_name = "lsp")] LSP, + /// Format clarity code files + #[clap(name = "format", aliases = &["fmt"], bin_name = "format")] + Formatter(Formatter), /// Step by step debugging and breakpoints from your code editor (VSCode, vim, emacs, etc) #[clap(name = "dap", bin_name = "dap")] DAP, } +#[derive(Parser, PartialEq, Clone, Debug)] +struct Formatter { + #[clap(long = "manifest-path", short = 'm')] + pub manifest_path: Option, + /// If specified, format only this file + #[clap(long = "file", short = 'f')] + pub file: Option, + #[clap(long = "max-line-length", short = 'l')] + pub max_line_length: Option, + #[clap(long = "tabs", short = 't')] + /// indentation size, e.g. 2 + pub indentation: Option, + #[clap(long = "dry-run")] + pub dry_run: bool, +} + #[derive(Subcommand, PartialEq, Clone, Debug)] enum Devnet { /// Generate package of all required devnet artifacts @@ -1180,6 +1200,28 @@ pub fn main() { process::exit(1); } }, + Command::Formatter(cmd) => { + let sources = get_sources_to_format(cmd.manifest_path, cmd.file); + let mut settings = Settings::default(); + + if let Some(max_line_length) = cmd.max_line_length { + settings.max_line_length = max_line_length; + } + + if let Some(indentation) = cmd.indentation { + settings.indentation = clarinet_format::formatter::Indentation::Space(indentation); + } + let mut formatter = ClarityFormatter::new(settings); + + for (file_path, source) in &sources { + let output = formatter.format(source); + if !cmd.dry_run { + let _ = overwrite_formatted(file_path, output); + } else { + println!("{}", output); + } + } + } Command::Devnet(subcommand) => match subcommand { Devnet::Package(cmd) => { let manifest = load_manifest_or_exit(cmd.manifest_path); @@ -1193,6 +1235,57 @@ pub fn main() { }; } +fn overwrite_formatted(file_path: &String, output: String) -> io::Result<()> { + let mut file = fs::File::create(file_path)?; + + file.write_all(output.as_bytes())?; + Ok(()) +} + +fn from_code_source(src: ClarityCodeSource) -> String { + match src { + ClarityCodeSource::ContractOnDisk(path_buf) => { + path_buf.as_path().to_str().unwrap().to_owned() + } + _ => panic!("invalid code source"), // TODO + } +} +// look for files at the default code path (./contracts/) if +// cmd.manifest_path is not specified OR if cmd.file is not specified +fn get_sources_from_manifest(manifest_path: Option) -> Vec { + let manifest = load_manifest_or_warn(manifest_path); + match manifest { + Some(manifest_path) => { + let contracts = manifest_path.contracts.values().cloned(); + contracts.map(|c| from_code_source(c.code_source)).collect() + } + None => { + // TODO this should probably just panic or fail gracefully because + // if the manifest isn't specified or found at the default location + // we can't do much + vec![] + } + } +} +fn get_sources_to_format( + manifest_path: Option, + file: Option, +) -> Vec<(String, String)> { + let files: Vec = match file { + Some(file_name) => vec![format!("{}", file_name)], + None => get_sources_from_manifest(manifest_path), + }; + // Map each file to its source code + files + .into_iter() + .map(|file_path| { + let source = fs::read_to_string(&file_path) + .unwrap_or_else(|_| "Failed to read file".to_string()); + (file_path, source) + }) + .collect() +} + fn get_manifest_location_or_exit(path: Option) -> FileLocation { match get_manifest_location(path) { Some(manifest_location) => manifest_location, diff --git a/components/clarinet-format/Cargo.toml b/components/clarinet-format/Cargo.toml new file mode 100644 index 000000000..d8c6a1092 --- /dev/null +++ b/components/clarinet-format/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "clarinet-format" +version = "0.1.0" +edition = "2021" + +[dependencies] +# clarity-repl = { path = "../clarity-repl" } +clarity = { workspace = true} + +[dev-dependencies] +pretty_assertions = "1.3" + +[features] +default = ["cli"] +cli = [ + "clarity/canonical", + "clarity/developer-mode", + "clarity/devtools", + "clarity/log", +] +wasm = [ + "clarity/wasm", + "clarity/developer-mode", + "clarity/devtools", +] + + +[lib] +name = "clarinet_format" +path = "src/lib.rs" +crate-type = ["lib"] diff --git a/components/clarinet-format/src/formatter/mod.rs b/components/clarinet-format/src/formatter/mod.rs new file mode 100644 index 000000000..08a178028 --- /dev/null +++ b/components/clarinet-format/src/formatter/mod.rs @@ -0,0 +1,1166 @@ +use std::fmt::format; + +use clarity::vm::functions::{define::DefineFunctions, NativeFunctions}; +use clarity::vm::representations::{PreSymbolicExpression, PreSymbolicExpressionType}; +use clarity::vm::types::{TupleTypeSignature, TypeSignature}; +use clarity::vm::ClarityName; + +pub enum Indentation { + Space(usize), + Tab, +} + +impl ToString for Indentation { + fn to_string(&self) -> String { + match self { + Indentation::Space(count) => " ".repeat(*count), + Indentation::Tab => "\t".to_string(), + } + } +} + +// commented blocks with this string included will not be formatted +const FORMAT_IGNORE_SYNTAX: &str = "@format-ignore"; + +// or/and with > N comparisons will be split across multiple lines +// (or +// true +// (is-eq 1 1) +// false +// ) +const BOOLEAN_BREAK_LIMIT: usize = 2; + +pub struct Settings { + pub indentation: Indentation, + pub max_line_length: usize, +} + +impl Settings { + pub fn new(indentation: Indentation, max_line_length: usize) -> Self { + Settings { + indentation, + max_line_length, + } + } +} +impl Default for Settings { + fn default() -> Settings { + Settings { + indentation: Indentation::Space(2), + max_line_length: 80, + } + } +} +// +pub struct ClarityFormatter { + settings: Settings, +} +impl ClarityFormatter { + pub fn new(settings: Settings) -> Self { + Self { settings } + } + /// formatting for files to ensure a newline at the end + pub fn format_file(&mut self, source: &str) -> String { + let pse = clarity::vm::ast::parser::v2::parse(source).unwrap(); + let result = format_source_exprs(&self.settings, &pse, "", None, ""); + + // make sure the file ends with a newline + result.trim_end_matches('\n').to_string() + "\n" + } + /// Alias `format_file` to `format` + pub fn format(&mut self, source: &str) -> String { + self.format_file(source) + } + /// for range formatting within editors + pub fn format_section(&mut self, source: &str) -> String { + let pse = clarity::vm::ast::parser::v2::parse(source).unwrap(); + format_source_exprs(&self.settings, &pse, "", None, "") + } +} + +pub fn format_source_exprs( + settings: &Settings, + expressions: &[PreSymbolicExpression], + previous_indentation: &str, + previous_expr: Option<&PreSymbolicExpression>, + acc: &str, +) -> String { + // println!("exprs: {:?}", expressions); + // println!("previous: {:?}", previous_expr); + + // use peekable to handle trailing comments nicely + let mut iter = expressions.iter().peekable(); + let mut result = acc.to_owned(); // Accumulate results here + + while let Some(expr) = iter.next() { + let trailing_comment = match iter.peek().cloned() { + Some(next) => { + if is_comment(next) && is_same_line(expr, next) { + iter.next(); + Some(next) + } else { + None + } + } + _ => None, + }; + let cur = display_pse(&Settings::default(), expr, previous_indentation); + if cur.contains(FORMAT_IGNORE_SYNTAX) { + if let Some(next) = iter.peek() { + // iter.next(); + // we need PreSymbolicExpression back into orig Source + result.push_str(&format!("{:?}", next)); // TODO obviously wrong + }; + continue; + } + if let Some(list) = expr.match_list() { + if let Some(atom_name) = list.split_first().and_then(|(f, _)| f.match_atom()) { + let formatted = if let Some(native) = NativeFunctions::lookup_by_name(atom_name) { + match native { + NativeFunctions::Let => format_let(settings, list, previous_indentation), + NativeFunctions::Begin => { + format_begin(settings, list, previous_indentation) + } + NativeFunctions::Match => { + format_match(settings, list, previous_indentation) + } + NativeFunctions::IndexOf + | NativeFunctions::IndexOfAlias + | NativeFunctions::Asserts + | NativeFunctions::ContractCall => { + format_general(settings, list, previous_indentation) + } + NativeFunctions::TupleCons => { + // if the kv map is defined with (tuple (c 1)) then we strip the + // ClarityName("tuple") out first and convert it to key/value syntax + format_key_value(settings, &list[1..], previous_indentation) + } + NativeFunctions::If => format_if(settings, list, previous_indentation), + NativeFunctions::ListCons => { + format_list(settings, list, previous_indentation) + } + NativeFunctions::And | NativeFunctions::Or => { + format_booleans(settings, list, previous_indentation) + } + _ => { + format!( + "({}){}", + format_source_exprs( + settings, + list, + previous_indentation, + previous_expr, + acc + ), + if let Some(comment) = trailing_comment { + format!( + " {}", + &display_pse(settings, comment, previous_indentation) + ) + } else { + "".to_string() + } + ) + } + } + } else if let Some(define) = DefineFunctions::lookup_by_name(atom_name) { + match define { + DefineFunctions::PublicFunction + | DefineFunctions::ReadOnlyFunction + | DefineFunctions::PrivateFunction => format_function(settings, list), + DefineFunctions::Constant | DefineFunctions::PersistedVariable => { + format_constant(settings, list) + } + DefineFunctions::Map => format_map(settings, list, previous_indentation), + DefineFunctions::UseTrait | DefineFunctions::ImplTrait => { + // these are the same as the following but need a trailing newline + format!( + "({})\n", + format_source_exprs( + settings, + list, + previous_indentation, + previous_expr, + acc + ) + ) + } + // DefineFunctions::Trait => format_trait(settings, list), + // DefineFunctions::PersistedVariable + // DefineFunctions::FungibleToken + // DefineFunctions::NonFungibleToken + _ => { + format!( + "({})", + format_source_exprs( + settings, + list, + previous_indentation, + previous_expr, + acc + ) + ) + } + } + } else { + format!( + "({})", + format_source_exprs(settings, list, previous_indentation, Some(expr), acc) + ) + }; + result.push_str(t(&formatted)); + continue; + } + } + let current = display_pse(settings, expr, ""); + let mut between = " "; + if let Some(next) = iter.peek() { + if !is_same_line(expr, next) || is_comment(expr) { + between = "\n"; + } + } else { + // no next expression to space out + between = ""; + } + + result.push_str(&format!("{current}{between}")); + } + result +} + +// trim but leaves newlines preserved +fn t(input: &str) -> &str { + let start = input + .find(|c: char| !c.is_whitespace() || c == '\n') + .unwrap_or(0); + + let end = input + .rfind(|c: char| !c.is_whitespace() || c == '\n') + .map(|pos| pos + 1) + .unwrap_or(0); + + &input[start..end] +} + +fn name_and_args( + exprs: &[PreSymbolicExpression], +) -> Option<(&PreSymbolicExpression, &[PreSymbolicExpression])> { + if exprs.len() >= 2 { + Some((&exprs[1], &exprs[2..])) + } else { + None // Return None if there aren't enough items + } +} + +fn format_constant(settings: &Settings, exprs: &[PreSymbolicExpression]) -> String { + let func_type = display_pse(settings, exprs.first().unwrap(), ""); + let indentation = &settings.indentation.to_string(); + let mut acc = format!("({func_type} "); + + if let Some((name, args)) = name_and_args(exprs) { + acc.push_str(&display_pse(settings, name, "")); + + // Access the value from args + if let Some(value) = args.first() { + if let Some(list) = value.match_list() { + acc.push_str(&format!( + "\n{}({})", + indentation, + format_source_exprs(settings, list, "", None, "") + )); + acc.push_str("\n)"); + } else { + // Handle non-list values (e.g., literals or simple expressions) + acc.push(' '); + acc.push_str(&display_pse(settings, value, "")); + acc.push(')'); + } + } + + acc.push('\n'); + acc.to_owned() + } else { + panic!("Expected a valid constant definition with (name value)") + } +} +fn format_map( + settings: &Settings, + exprs: &[PreSymbolicExpression], + previous_indentation: &str, +) -> String { + let mut acc = "(define-map ".to_string(); + let indentation = &settings.indentation.to_string(); + let space = format!("{}{}", previous_indentation, indentation); + + if let Some((name, args)) = name_and_args(exprs) { + acc.push_str(&display_pse(settings, name, "")); + + for arg in args.iter() { + match &arg.pre_expr { + // this is hacked in to handle situations where the contents of + // map is a 'tuple' + PreSymbolicExpressionType::Tuple(list) => acc.push_str(&format!( + "\n{}{}", + space, + format_key_value_sugar(settings, &list.to_vec(), indentation) + )), + _ => acc.push_str(&format!( + "\n{}{}", + space, + format_source_exprs(settings, &[arg.clone()], indentation, None, "") + )), + } + } + + acc.push_str(&format!("\n{})\n", previous_indentation)); + acc.to_owned() + } else { + panic!("define-map without a name is invalid") + } +} + +fn is_same_line(expr1: &PreSymbolicExpression, expr2: &PreSymbolicExpression) -> bool { + expr1.span().start_line == expr2.span().start_line +} + +// this is probably un-needed but was getting some weird artifacts for code like +// (something (1 2 3) true) would be formatted as (something (1 2 3)true) +fn format_general( + settings: &Settings, + exprs: &[PreSymbolicExpression], + previous_indentation: &str, +) -> String { + let func_type = display_pse(settings, exprs.first().unwrap(), ""); + let mut acc = format!("({func_type}"); + acc.push(' '); + for (i, arg) in exprs[1..].iter().enumerate() { + acc.push_str(&format!( + "{}{}", + format_source_exprs(settings, &[arg.clone()], previous_indentation, None, ""), + if i < exprs.len() - 2 { " " } else { "" } + )) + } + acc.push(')'); + acc.to_owned() +} +// *begin* never on one line +fn format_begin( + settings: &Settings, + exprs: &[PreSymbolicExpression], + previous_indentation: &str, +) -> String { + let mut acc = "(begin".to_string(); + let indentation = &settings.indentation.to_string(); + let space = format!("{}{}", previous_indentation, indentation); + + let mut iter = exprs.get(1..).unwrap_or_default().iter().peekable(); + while let Some(expr) = iter.next() { + // cloned() here because of the second mutable borrow on iter.next() + let trailing = match iter.peek().cloned() { + Some(next) => { + if is_comment(next) && is_same_line(expr, next) { + iter.next(); + Some(next) + } else { + None + } + } + _ => None, + }; + if let Some(list) = expr.match_list() { + acc.push_str(&format!( + "\n{}({})", + space, + format_source_exprs(settings, list, previous_indentation, None, "") + )); + if let Some(comment) = trailing { + acc.push(' '); + acc.push_str(&display_pse(settings, comment, previous_indentation)); + } + } + } + acc.push_str(&format!("\n{})\n", previous_indentation)); + acc.to_owned() +} + +fn is_comment(pse: &PreSymbolicExpression) -> bool { + matches!(pse.pre_expr, PreSymbolicExpressionType::Comment(_)) +} +pub fn without_comments_len(exprs: &[PreSymbolicExpression]) -> usize { + exprs.iter().filter(|expr| !is_comment(expr)).count() +} + +// formats (and ..) and (or ...) +// if given more than BOOLEAN_BREAK_LIMIT expressions it will break it onto new lines +fn format_booleans( + settings: &Settings, + exprs: &[PreSymbolicExpression], + previous_indentation: &str, +) -> String { + let func_type = display_pse(settings, exprs.first().unwrap(), ""); + let mut acc = format!("({func_type}"); + let indentation = &settings.indentation.to_string(); + let space = format!("{}{}", previous_indentation, indentation); + let break_up = without_comments_len(&exprs[1..]) > BOOLEAN_BREAK_LIMIT; + if break_up { + let mut iter = exprs.get(1..).unwrap_or_default().iter().peekable(); + while let Some(expr) = iter.next() { + let trailing = match iter.peek().cloned() { + Some(next) => { + if is_comment(next) && is_same_line(expr, next) { + iter.next(); + Some(next) + } else { + None + } + } + _ => None, + }; + if let Some(list) = expr.match_list() { + acc.push_str(&format!( + "\n{}({})", + space, + format_source_exprs(settings, list, previous_indentation, None, "") + )); + if let Some(comment) = trailing { + acc.push(' '); + acc.push_str(&display_pse(settings, comment, previous_indentation)); + } + } else { + acc.push_str(&format!( + "\n{}{}", + space, + format_source_exprs(settings, &[expr.clone()], previous_indentation, None, "") + )); + } + } + } else { + acc.push(' '); + acc.push_str(&format_source_exprs( + settings, + &exprs[1..], + previous_indentation, + None, + "", + )) + } + if break_up { + acc.push_str(&format!("\n{}", previous_indentation)); + } + acc.push(')'); + acc.to_owned() +} + +fn format_if( + settings: &Settings, + exprs: &[PreSymbolicExpression], + previous_indentation: &str, +) -> String { + let func_type = display_pse(settings, exprs.first().unwrap(), ""); + let indentation = &settings.indentation.to_string(); + let space = format!("{}{}", indentation, previous_indentation); + + let mut acc = format!("({func_type} "); + let mut iter = exprs[1..].iter().peekable(); + let mut index = 0; + + while let Some(expr) = iter.next() { + let trailing = match iter.peek().cloned() { + Some(next) => { + if is_comment(next) && is_same_line(expr, next) { + iter.next(); + Some(next) + } else { + None + } + } + _ => None, + }; + if let Some(list) = expr.match_list() { + // expr args + acc.push_str(&format!( + "{}({})\n", + if index > 0 { + space.clone() + } else { + "".to_string() + }, + format_source_exprs(settings, list, &space, None, "") + )) + } else { + // atom args + acc.push_str(&format_source_exprs( + settings, + &[expr.clone()], + &space, + None, + "", + )) + } + if let Some(comment) = trailing { + acc.push(' '); + acc.push_str(&display_pse(settings, comment, "")); + } + index += 1; + } + acc.push_str(previous_indentation); + acc.push(')'); + acc +} + +// *let* never on one line +fn format_let( + settings: &Settings, + exprs: &[PreSymbolicExpression], + previous_indentation: &str, +) -> String { + let mut acc = "(let (".to_string(); + let indentation = &settings.indentation.to_string(); + let space = format!("{}{}", previous_indentation, indentation); + if let Some(args) = exprs[1].match_list() { + for arg in args.iter() { + acc.push_str(&format!( + "\n{}{}", + space, + format_source_exprs(settings, &[arg.clone()], previous_indentation, None, "") + )) + } + } + acc.push_str(&format!("\n{})", previous_indentation)); + for e in exprs.get(2..).unwrap_or_default() { + acc.push_str(&format!( + "\n{}{}", + space, + format_source_exprs(settings, &[e.clone()], previous_indentation, None, "") + )) + } + acc.push_str(&format!("\n{})", previous_indentation)); + acc.to_owned() +} + +// * match * +// always multiple lines +fn format_match( + settings: &Settings, + exprs: &[PreSymbolicExpression], + previous_indentation: &str, +) -> String { + let mut acc = "(match ".to_string(); + let indentation = &settings.indentation.to_string(); + let space = format!("{}{}", previous_indentation, indentation); + + // value to match on + acc.push_str(&format_source_exprs( + settings, + &[exprs[1].clone()], + previous_indentation, + None, + "", + )); + // branches evenly spaced + for branch in exprs[2..].iter() { + acc.push_str(&format!( + "\n{}{}", + space, + format_source_exprs(settings, &[branch.clone()], &space, None, "") + )); + } + acc.push_str(&format!("\n{})", previous_indentation)); + acc.to_owned() +} + +fn format_list( + settings: &Settings, + exprs: &[PreSymbolicExpression], + previous_indentation: &str, +) -> String { + let mut acc = "(".to_string(); + let breaks = line_length_over_max(settings, exprs); + for (i, expr) in exprs[0..].iter().enumerate() { + let value = format_source_exprs(settings, &[expr.clone()], "", None, ""); + let space = if breaks { '\n' } else { ' ' }; + if i < exprs.len() - 1 { + acc.push_str(&value.to_string()); + acc.push(space); + } else { + acc.push_str(&value.to_string()); + } + } + acc.push_str(&format!( + "{}{})", + previous_indentation, + if breaks { "\n" } else { "" }, + )); + t(&acc).to_string() +} + +fn line_length_over_max(settings: &Settings, exprs: &[PreSymbolicExpression]) -> bool { + if let Some(last_expr) = exprs.last() { + last_expr.span.end_column >= settings.max_line_length.try_into().unwrap() + } else { + false + } +} +// used for { n1: 1 } syntax +fn format_key_value_sugar( + settings: &Settings, + exprs: &[PreSymbolicExpression], + previous_indentation: &str, +) -> String { + let indentation = &settings.indentation.to_string(); + let space = format!("{}{}", previous_indentation, indentation); + let over_2_kvs = without_comments_len(exprs) > 2; + let mut acc = "{".to_string(); + + // TODO this code is horrible + // convert it to the peekable version like the rest + if over_2_kvs { + acc.push('\n'); + let mut counter = 1; + for (i, expr) in exprs.iter().enumerate() { + if is_comment(expr) { + acc.push_str(&format!( + "{}{}\n", + space, + format_source_exprs(settings, &[expr.clone()], previous_indentation, None, "") + )) + } else { + let last = i == exprs.len() - 1; + // if counter is even we're on the value + if counter % 2 == 0 { + acc.push_str(&format!( + ": {}{}\n", + format_source_exprs( + settings, + &[expr.clone()], + previous_indentation, + None, + "" + ), + if last { "" } else { "," } + )); + } else { + // if counter is odd we're on the key + acc.push_str(&format!( + "{}{}", + space, + format_source_exprs( + settings, + &[expr.clone()], + previous_indentation, + None, + "" + ) + )); + } + counter += 1 + } + } + } else { + // for cases where we keep it on the same line with 1 k/v pair + let fkey = display_pse(settings, &exprs[0], previous_indentation); + acc.push_str(&format!( + " {fkey}: {} ", + format_source_exprs( + settings, + &[exprs[1].clone()], + previous_indentation, + None, + "" + ) + )); + } + if exprs.len() > 2 { + acc.push_str(previous_indentation); + } + acc.push('}'); + acc.to_string() +} + +// used for (tuple (n1 1)) syntax +fn format_key_value( + settings: &Settings, + exprs: &[PreSymbolicExpression], + previous_indentation: &str, +) -> String { + let indentation = &settings.indentation.to_string(); + let space = format!("{}{}", previous_indentation, indentation); + + let mut acc = previous_indentation.to_string(); + acc.push('{'); + + // for cases where we keep it on the same line with 1 k/v pair + let multiline = exprs.len() > 1; + let pre = if multiline { + format!("\n{}", space) + } else { + " ".to_string() + }; + for (i, expr) in exprs.iter().enumerate() { + let (key, value) = expr + .match_list() + .and_then(|list| list.split_first()) + .unwrap(); + let fkey = display_pse(settings, key, previous_indentation); + let ending = if multiline { + if i < exprs.len() - 1 { + "," + } else { + "\n" + } + } else { + " " + }; + + acc.push_str(&format!( + "{pre}{fkey}: {}{ending}", + format_source_exprs(settings, value, previous_indentation, None, "") + )); + } + acc.push_str(previous_indentation); + acc.push('}'); + acc.to_string() +} + +// This should panic on most things besides atoms and values. Added this to help +// debugging in the meantime +fn display_pse( + settings: &Settings, + pse: &PreSymbolicExpression, + previous_indentation: &str, +) -> String { + match pse.pre_expr { + PreSymbolicExpressionType::Atom(ref value) => t(value.as_str()).to_string(), + PreSymbolicExpressionType::AtomValue(ref value) => value.to_string(), + PreSymbolicExpressionType::List(ref items) => { + format_list(settings, items, previous_indentation) + } + PreSymbolicExpressionType::Tuple(ref items) => { + format_key_value_sugar(settings, items, previous_indentation) + } + PreSymbolicExpressionType::SugaredContractIdentifier(ref name) => { + format!(".{}", name) + } + PreSymbolicExpressionType::SugaredFieldIdentifier(ref contract, ref field) => { + format!(".{}.{}", contract, field) + } + PreSymbolicExpressionType::FieldIdentifier(ref trait_id) => { + format!("'{}", trait_id) + } + PreSymbolicExpressionType::TraitReference(ref name) => { + println!("trait ref: {}", name); + name.to_string() + } + PreSymbolicExpressionType::Comment(ref text) => { + format!(";; {}", t(text)) + } + PreSymbolicExpressionType::Placeholder(ref placeholder) => { + placeholder.to_string() // Placeholder is for if parsing fails + } + } +} + +// * functions + +// Top level define- should have a line break above and after (except on first line) +// options always on new lines +// Functions Always on multiple lines, even if short +fn format_function(settings: &Settings, exprs: &[PreSymbolicExpression]) -> String { + let func_type = display_pse(settings, exprs.first().unwrap(), ""); + let indentation = &settings.indentation.to_string(); + let args_indent = format!("{}{}", indentation, indentation); + + let mut acc = format!("({func_type} ("); + + // function name and arguments + if let Some(def) = exprs.get(1).and_then(|f| f.match_list()) { + if let Some((name, args)) = def.split_first() { + acc.push_str(&display_pse(settings, name, "")); + + let mut iter = args.iter().peekable(); + while let Some(arg) = iter.next() { + // cloned() here because of the second mutable borrow on iter.next() + let trailing = match iter.peek().cloned() { + Some(next) => { + if is_comment(next) && is_same_line(arg, next) { + iter.next(); + Some(next) + } else { + None + } + } + _ => None, + }; + if let Some(list) = arg.match_list() { + // expr args + acc.push_str(&format!( + "\n{}({})", + args_indent, + format_source_exprs(settings, list, &args_indent, None, "") + )) + } else { + // atom args + acc.push_str(&format_source_exprs( + settings, + &[arg.clone()], + &args_indent, + None, + "", + )) + } + if let Some(comment) = trailing { + acc.push(' '); + acc.push_str(&display_pse(settings, comment, "")); + } + } + if args.is_empty() { + acc.push(')') + } else { + acc.push_str(&format!("\n{})", indentation)) + } + } else { + panic!("can't have a nameless function") + } + } + + // function body expressions + // TODO this should account for comments + for expr in exprs.get(2..).unwrap_or_default() { + acc.push_str(&format!( + "\n{}{}", + indentation, + format_source_exprs( + settings, + &[expr.clone()], + &settings.indentation.to_string(), + None, // TODO + "" + ) + )) + } + acc.push_str("\n)\n\n"); + acc.to_owned() +} + +#[cfg(test)] +mod tests_formatter { + use super::{ClarityFormatter, Settings}; + use crate::formatter::Indentation; + use std::collections::HashMap; + use std::fs; + use std::path::Path; + fn from_metadata(metadata: &str) -> Settings { + let mut max_line_length = 80; + let mut indent = Indentation::Space(2); + + let metadata_map: HashMap<&str, &str> = metadata + .split(',') + .map(|pair| pair.trim()) + .filter_map(|kv| kv.split_once(':')) + .map(|(k, v)| (k.trim(), v.trim())) + .collect(); + + if let Some(length) = metadata_map.get("max_line_length") { + max_line_length = length.parse().unwrap_or(max_line_length); + } + + if let Some(&indentation) = metadata_map.get("indentation") { + indent = match indentation { + "tab" => Indentation::Tab, + value => { + if let Ok(spaces) = value.parse::() { + Indentation::Space(spaces) + } else { + Indentation::Space(2) // Fallback to default + } + } + }; + } + + Settings { + max_line_length, + indentation: indent, + } + } + fn format_with_default(source: &str) -> String { + let mut formatter = ClarityFormatter::new(Settings::default()); + formatter.format_section(source) + } + fn format_file_with_metadata(source: &str) -> String { + let mut lines = source.lines(); + let metadata_line = lines.next().unwrap_or_default(); + let settings = from_metadata(metadata_line); + + let real_source = lines.collect::>().join("\n"); + let mut formatter = ClarityFormatter::new(settings); + formatter.format_file(&real_source) + } + fn format_with(source: &str, settings: Settings) -> String { + let mut formatter = ClarityFormatter::new(settings); + formatter.format_section(source) + } + #[test] + fn test_simplest_formatter() { + let result = format_with_default(&String::from("( ok true )")); + assert_eq!(result, "(ok true)"); + } + + #[test] + fn test_manual_tuple() { + let result = format_with_default(&String::from("(tuple (n1 1))")); + assert_eq!(result, "{ n1: 1 }"); + let result = format_with_default(&String::from("(tuple (n1 1) (n2 2))")); + assert_eq!(result, "{\n n1: 1,\n n2: 2\n}"); + } + + #[test] + fn test_function_formatter() { + let result = format_with_default(&String::from("(define-private (my-func) (ok true))")); + assert_eq!(result, "(define-private (my-func)\n (ok true)\n)\n\n"); + } + + #[test] + fn test_multi_function() { + let src = "(define-public (my-func) (ok true))\n(define-public (my-func2) (ok true))"; + let result = format_with_default(&String::from(src)); + let expected = r#"(define-public (my-func) + (ok true) +) + +(define-public (my-func2) + (ok true) +) + +"#; + assert_eq!(expected, result); + } + #[test] + fn test_function_args_multiline() { + let src = "(define-public (my-func (amount uint) (sender principal)) (ok true))"; + let result = format_with_default(&String::from(src)); + assert_eq!( + result, + "(define-public (my-func\n (amount uint)\n (sender principal)\n )\n (ok true)\n)\n\n" + ); + } + #[test] + fn test_pre_comments_included() { + let src = ";; this is a pre comment\n;; multi\n(ok true)"; + let result = format_with_default(&String::from(src)); + assert_eq!(src, result); + } + + #[test] + fn test_inline_comments_included() { + let src = "(ok true) ;; this is an inline comment"; + let result = format_with_default(&String::from(src)); + assert_eq!(src, result); + } + #[test] + #[ignore] + fn test_postcomments_included() { + let src = "(ok true)\n;; this is a post comment"; + let result = format_with_default(&String::from(src)); + assert_eq!(src, result); + } + + #[test] + fn test_booleans() { + let src = "(or true false)"; + let result = format_with_default(&String::from(src)); + assert_eq!(src, result); + let src = "(or true (is-eq 1 2) (is-eq 1 1))"; + let result = format_with_default(&String::from(src)); + let expected = "(or\n true\n (is-eq 1 2)\n (is-eq 1 1)\n)"; + assert_eq!(expected, result); + } + #[test] + fn test_booleans_with_comments() { + let src = r#"(or + true + ;; pre comment + (is-eq 1 2) ;; comment + (is-eq 1 1) ;; b +)"#; + let result = format_with_default(&String::from(src)); + assert_eq!(src, result); + } + + #[test] + #[ignore] + fn long_line_unwrapping() { + let src = "(try! (unwrap! (complete-deposit-wrapper (get txid deposit) (get vout-index deposit) (get amount deposit) (get recipient deposit) (get burn-hash deposit) (get burn-height deposit) (get sweep-txid deposit)) (err (+ ERR_DEPOSIT_INDEX_PREFIX (+ u10 index)))))"; + let result = format_with_default(&String::from(src)); + let expected = "(try! (unwrap! (complete-deposit-wrapper\n (get txid deposit)\n (get vout-index deposit)\n (get amount deposit)\n (get recipient deposit)\n (get burn-hash deposit)\n (get burn-height deposit)\n (get sweep-txid deposit)\n ) (err (+ ERR_DEPOSIT_INDEX_PREFIX (+ u10 index)))))"; + assert_eq!(expected, result); + } + + #[test] + fn test_map() { + let src = "(define-map a uint {n1: (buff 20)})"; + let result = format_with_default(&String::from(src)); + assert_eq!(result, "(define-map a\n uint\n { n1: (buff 20) }\n)\n"); + let src = "(define-map something { name: (buff 48), a: uint } uint)"; + let result = format_with_default(&String::from(src)); + assert_eq!( + result, + "(define-map something\n {\n name: (buff 48),\n a: uint\n }\n uint\n)\n" + ); + } + + #[test] + fn test_let() { + let src = "(let ((a 1) (b 2)) (+ a b))"; + let result = format_with_default(&String::from(src)); + let expected = "(let (\n (a 1)\n (b 2)\n)\n (+ a b)\n)"; + assert_eq!(expected, result); + } + + #[test] + fn test_option_match() { + let src = "(match opt value (ok (handle-new-value value)) (ok 1))"; + let result = format_with_default(&String::from(src)); + // "(match opt\n + let expected = r#"(match opt + value + (ok (handle-new-value value)) + (ok 1) +)"#; + assert_eq!(result, expected); + } + + #[test] + fn test_response_match() { + let src = "(match x value (ok (+ to-add value)) err-value (err err-value))"; + let result = format_with_default(&String::from(src)); + let expected = r#"(match x + value + (ok (+ to-add value)) + err-value + (err err-value) +)"#; + assert_eq!(result, expected); + } + #[test] + fn test_key_value_sugar() { + let src = "{name: (buff 48)}"; + let result = format_with_default(&String::from(src)); + assert_eq!(result, "{ name: (buff 48) }"); + let src = "{ name: (buff 48), a: uint }"; + let result = format_with_default(&String::from(src)); + assert_eq!(result, "{\n name: (buff 48),\n a: uint\n}"); + } + + #[test] + fn test_key_value_sugar_comment_midrecord() { + let src = r#"{ + name: (buff 48), + ;; comment + owner: send-to +}"#; + let result = format_with_default(&String::from(src)); + assert_eq!(src, result); + } + + #[test] + fn test_basic_slice() { + let src = "(slice? (1 2 3 4 5) u5 u9)"; + let result = format_with_default(&String::from(src)); + assert_eq!(src, result); + } + #[test] + fn test_constant() { + let src = "(define-constant something 1)\n"; + let result = format_with_default(&String::from(src)); + assert_eq!(result, "(define-constant something 1)\n"); + let src2 = "(define-constant something (1 2))\n"; + let result2 = format_with_default(&String::from(src2)); + assert_eq!(result2, "(define-constant something\n (1 2)\n)\n"); + } + + #[test] + fn test_begin_never_one_line() { + let src = "(begin (ok true))"; + let result = format_with_default(&String::from(src)); + assert_eq!(result, "(begin\n (ok true)\n)\n"); + } + + #[test] + fn test_begin() { + let src = "(begin (+ 1 1) ;; a\n (ok true))"; + let result = format_with_default(&String::from(src)); + assert_eq!(result, "(begin\n (+ 1 1) ;; a\n (ok true)\n)\n"); + } + + #[test] + fn test_custom_tab_setting() { + let src = "(begin (ok true))"; + let result = format_with(&String::from(src), Settings::new(Indentation::Space(4), 80)); + assert_eq!(result, "(begin\n (ok true)\n)\n"); + } + + #[test] + fn test_if() { + let src = " (if (<= amount max-supply) (list) (something amount))"; + let result = format_with_default(&String::from(src)); + let expected = "(if (<= amount max-supply)\n (list)\n (something amount)\n)"; + assert_eq!(result, expected); + } + #[test] + #[ignore] + fn test_ignore_formatting() { + let src = ";; @format-ignore\n( begin ( ok true ))"; + let result = format_with(&String::from(src), Settings::new(Indentation::Space(4), 80)); + assert_eq!(src, result); + } + + #[test] + fn test_index_of() { + let src = "(index-of? (contract-call? .pool borroweable) asset)"; + let result = format_with_default(&String::from(src)); + assert_eq!(src, result); + } + #[test] + fn test_traits() { + let src = "(use-trait token-a-trait 'SPAXYA5XS51713FDTQ8H94EJ4V579CXMTRNBZKSF.token-a.token-trait)\n"; + let result = format_with(&String::from(src), Settings::new(Indentation::Space(4), 80)); + assert_eq!(src, result); + let src = "(as-contract (contract-call? .tokens mint! u19))"; + let result = format_with(&String::from(src), Settings::new(Indentation::Space(4), 80)); + assert_eq!(src, result); + } + + #[test] + #[ignore] + fn test_irl_contracts() { + let golden_dir = "./tests/golden"; + let intended_dir = "./tests/golden-intended"; + + // Iterate over files in the golden directory + for entry in fs::read_dir(golden_dir).expect("Failed to read golden directory") { + let entry = entry.expect("Failed to read directory entry"); + let path = entry.path(); + + if path.is_file() { + let src = fs::read_to_string(&path).expect("Failed to read source file"); + + let file_name = path.file_name().expect("Failed to get file name"); + let intended_path = Path::new(intended_dir).join(file_name); + + let intended = + fs::read_to_string(&intended_path).expect("Failed to read intended file"); + + // Apply formatting and compare + let result = format_file_with_metadata(&src); + // println!("intended: {:?}", intended); + // println!("result: {:?}", result); + pretty_assertions::assert_eq!( + result, + intended, + "Mismatch in file: {:?}", + file_name + ); + } + } + } +} diff --git a/components/clarinet-format/src/lib.rs b/components/clarinet-format/src/lib.rs new file mode 100644 index 000000000..96dc2d95d --- /dev/null +++ b/components/clarinet-format/src/lib.rs @@ -0,0 +1 @@ +pub mod formatter; diff --git a/components/clarinet-format/tests/golden-intended/alex-transfer.clar b/components/clarinet-format/tests/golden-intended/alex-transfer.clar new file mode 100644 index 000000000..43bbcc5e9 --- /dev/null +++ b/components/clarinet-format/tests/golden-intended/alex-transfer.clar @@ -0,0 +1,66 @@ +;; https://github.com/alexgo-io/alex-v1/blob/dev/clarity/contracts/stx404-token/token-stx404.clar#L60-L94 +(define-public (transfer + (amount-or-id uint) + (sender principal) + (recipient principal) + ) + (begin + (asserts! (is-eq sender tx-sender) err-not-authorised) + (if (<= amount-or-id max-supply) ;; id transfer + (let ( + (check-id (asserts! (is-id-owned-by-or-default amount-or-id sender) err-invalid-id)) + (owned-by-sender (get-owned-or-default sender)) + (owned-by-recipient (get-owned-or-default recipient)) + (id-idx (unwrap-panic (index-of? owned-by-sender amount-or-id)))) + (map-set owned sender (pop owned-by-sender id-idx)) + (map-set owned recipient (unwrap-panic (as-max-len? (append owned-by-recipient amount-or-id) u10000))) + (try! (ft-transfer? stx404 one-8 sender recipient)) + (try! (nft-transfer? stx404nft amount-or-id sender recipient)) + (ok true) + ) + (let ( + (balance-sender (unwrap-panic (get-balance sender))) + (balance-recipient (unwrap-panic (get-balance recipient))) + (check-balance (try! (ft-transfer? stx404 amount-or-id sender recipient))) + (no-to-treasury (- (/ balance-sender one-8) (/ (- balance-sender amount-or-id) one-8))) + (no-to-recipient (- (/ (+ balance-recipient amount-or-id) one-8) (/ balance-recipient one-8))) + (owned-by-sender (get-owned-or-default sender)) + (owned-by-recipient (get-owned-or-default recipient)) + (ids-to-treasury (if (is-eq no-to-treasury u0) + (list) + (unwrap-panic (slice? owned-by-sender (- (len owned-by-sender) no-to-treasury) (len owned-by-sender))) + ) + ) + (new-available-ids (if (is-eq no-to-treasury u0) + (var-get available-ids) + (unwrap-panic (as-max-len? (concat (var-get available-ids) ids-to-treasury) u10000)) + ) + ) + (ids-to-recipient (if (is-eq no-to-recipient u0) + (list) + (unwrap-panic (slice? new-available-ids (- (len new-available-ids) no-to-recipient) (len new-available-ids))) + ) + ) + ) + (var-set sender-temp sender) + (var-set recipient-temp (as-contract tx-sender)) + (and (> no-to-treasury u0) (try! (fold check-err (map nft-transfer-iter ids-to-treasury) (ok true)))) + (var-set sender-temp (as-contract tx-sender)) + (var-set recipient-temp recipient) + (and (> no-to-recipient u0) (try! (fold check-err (map nft-transfer-iter ids-to-recipient) (ok true)))) + (map-set owned sender (if (is-eq no-to-treasury u0) + owned-by-sender + (unwrap-panic (slice? owned-by-sender u0 (- (len owned-by-sender) no-to-treasury))) + ) + ) + (map-set owned recipient (if (is-eq no-to-recipient u0) + owned-by-recipient + (unwrap-panic (as-max-len? (concat owned-by-recipient ids-to-recipient) u10000)) + ) + ) + (var-set available-ids (if (is-eq no-to-recipient u0) new-available-ids (unwrap-panic (slice? new-available-ids u0 (- (len new-available-ids) no-to-recipient))))) + (ok true) + ) + ) + ) +) diff --git a/components/clarinet-format/tests/golden-intended/clarity-bitcoin.clar b/components/clarinet-format/tests/golden-intended/clarity-bitcoin.clar new file mode 100644 index 000000000..95f859ab4 --- /dev/null +++ b/components/clarinet-format/tests/golden-intended/clarity-bitcoin.clar @@ -0,0 +1,560 @@ +;; source: https://github.com/hirosystems/clarity-examples/blob/main/examples/clarity-bitcoin/contracts/clarity-bitcoin.clar + +;; @contract stateless contract to verify bitcoin transaction +;; @version 5 + +;; version 5 adds support for txid generation and improves security + +;; Error codes +(define-constant ERR-OUT-OF-BOUNDS u1) +(define-constant ERR-TOO-MANY-TXINS u2) +(define-constant ERR-TOO-MANY-TXOUTS u3) +(define-constant ERR-VARSLICE-TOO-LONG u4) +(define-constant ERR-BAD-HEADER u5) +(define-constant ERR-HEADER-HEIGHT-MISMATCH u6) +(define-constant ERR-INVALID-MERKLE-PROOF u7) +(define-constant ERR-PROOF-TOO-SHORT u8) +(define-constant ERR-TOO-MANY-WITNESSES u9) +(define-constant ERR-INVALID-COMMITMENT u10) +(define-constant ERR-WITNESS-TX-NOT-IN-COMMITMENT u11) +(define-constant ERR-NOT-SEGWIT-TRANSACTION u12) +(define-constant ERR-LEFTOVER-DATA u13) + +;; +;; Helper functions to parse bitcoin transactions +;; + +;; Create a list with n elments `true`. n must be smaller than 9. +(define-private (bool-list-of-len (n uint)) + (unwrap-panic (slice? (list true true true true true true true true) u0 n))) + +;; Reads the next two bytes from txbuff as a little-endian 16-bit integer, and updates the index. +;; Returns (ok { uint16: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff +(define-read-only (read-uint8 (ctx { txbuff: (buff 4096), index: uint})) + (let ((data (get txbuff ctx)) + (base (get index ctx))) + (ok {uint8: (buff-to-uint-le (unwrap-panic (as-max-len? (unwrap! (slice? data base (+ base u1)) (err ERR-OUT-OF-BOUNDS)) u1))), + ctx: { txbuff: data, index: (+ u1 base)}}))) + +;; Reads the next two bytes from txbuff as a little-endian 16-bit integer, and updates the index. +;; Returns (ok { uint16: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff +(define-read-only (read-uint16 (ctx { txbuff: (buff 4096), index: uint})) + (let ((data (get txbuff ctx)) + (base (get index ctx))) + (ok {uint16: (buff-to-uint-le (unwrap-panic (as-max-len? (unwrap! (slice? data base (+ base u2)) (err ERR-OUT-OF-BOUNDS)) u2))), + ctx: { txbuff: data, index: (+ u2 base)}}))) + +;; Reads the next four bytes from txbuff as a little-endian 32-bit integer, and updates the index. +;; Returns (ok { uint32: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff +(define-read-only (read-uint32 (ctx { txbuff: (buff 4096), index: uint})) + (let ((data (get txbuff ctx)) + (base (get index ctx))) + (ok {uint32: (buff-to-uint-le (unwrap-panic (as-max-len? (unwrap! (slice? data base (+ base u4)) (err ERR-OUT-OF-BOUNDS)) u4))), + ctx: { txbuff: data, index: (+ u4 base)}}))) + +;; Reads the next eight bytes from txbuff as a little-endian 64-bit integer, and updates the index. +;; Returns (ok { uint64: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff +(define-read-only (read-uint64 (ctx { txbuff: (buff 4096), index: uint})) + (let ((data (get txbuff ctx)) + (base (get index ctx))) + (ok {uint64: (buff-to-uint-le (unwrap-panic (as-max-len? (unwrap! (slice? data base (+ base u8)) (err ERR-OUT-OF-BOUNDS)) u8))), + ctx: { txbuff: data, index: (+ u8 base)}}))) + +;; Reads the next varint from txbuff, and updates the index. +;; Returns (ok { varint: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +(define-read-only (read-varint (ctx { txbuff: (buff 4096), index: uint})) + (let ((ptr (get index ctx)) + (tx (get txbuff ctx)) + (byte (buff-to-uint-le (unwrap! (element-at tx ptr) + (err ERR-OUT-OF-BOUNDS))))) + (if (<= byte u252) + ;; given byte is the varint + (ok { varint: byte, ctx: { txbuff: tx, index: (+ u1 ptr)}}) + (if (is-eq byte u253) + (let ( + ;; next two bytes is the varint + (parsed-u16 (try! (read-uint16 { txbuff: tx, index: (+ u1 ptr)})))) + (ok { varint: (get uint16 parsed-u16), ctx: (get ctx parsed-u16)})) + (if (is-eq byte u254) + (let ( + ;; next four bytes is the varint + (parsed-u32 (try! (read-uint32 { txbuff: tx, index: (+ u1 ptr)})))) + (ok { varint: (get uint32 parsed-u32), ctx: (get ctx parsed-u32)})) + (let ( + ;; next eight bytes is the varint + (parsed-u64 (try! (read-uint64 { txbuff: tx, index: (+ u1 ptr)})))) + (ok { varint: (get uint64 parsed-u64), ctx: (get ctx parsed-u64)}))))))) + +;; Reads a varint-prefixed byte slice from txbuff, and updates the index to point to the byte after the varint and slice. +;; Returns (ok { varslice: (buff 4096), ctx: { txbuff: (buff 4096), index: uint } }) on success, where varslice has the length of the varint prefix. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +(define-read-only (read-varslice (old-ctx { txbuff: (buff 4096), index: uint})) + (let ((parsed (try! (read-varint old-ctx))) + (ctx (get ctx parsed)) + (slice-start (get index ctx)) + (target-index (+ slice-start (get varint parsed))) + (txbuff (get txbuff ctx))) + (ok {varslice: (unwrap! (slice? txbuff slice-start target-index) (err ERR-OUT-OF-BOUNDS)), + ctx: { txbuff: txbuff, index: target-index}}))) + +(define-private (reverse-buff16 (input (buff 16))) + (unwrap-panic (slice? (unwrap-panic (to-consensus-buff? (buff-to-uint-le input))) u1 u17))) + +(define-read-only (reverse-buff32 (input (buff 32))) + (unwrap-panic (as-max-len? (concat + (reverse-buff16 (unwrap-panic (as-max-len? (unwrap-panic (slice? input u16 u32)) u16))) + (reverse-buff16 (unwrap-panic (as-max-len? (unwrap-panic (slice? input u0 u16)) u16)))) u32))) + +;; Reads a little-endian hash -- consume the next 32 bytes, and reverse them. +;; Returns (ok { hashslice: (buff 32), ctx: { txbuff: (buff 4096), index: uint } }) on success, and updates the index. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +(define-read-only (read-hashslice (old-ctx { txbuff: (buff 4096), index: uint})) + (let ((slice-start (get index old-ctx)) + (target-index (+ u32 slice-start)) + (txbuff (get txbuff old-ctx)) + (hash-le (unwrap-panic + (as-max-len? (unwrap! + (slice? txbuff slice-start target-index) (err ERR-OUT-OF-BOUNDS)) u32)))) + (ok {hashslice: (reverse-buff32 hash-le), + ctx: { txbuff: txbuff, index: target-index}}))) + +;; Inner fold method to read the next tx input from txbuff. +;; The index in ctx will be updated to point to the next tx input if all goes well (or to the start of the outputs) +;; Returns (ok { ... }) on success. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptSig that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXINS) if there are more than eight inputs to read. +(define-read-only (read-next-txin (ignored bool) + (result (response {ctx: { txbuff: (buff 4096), index: uint }, + remaining: uint, + txins: (list 8 {outpoint: { + hash: (buff 32), + index: uint}, + scriptSig: (buff 256), ;; just big enough to hold a 2-of-3 multisig script + sequence: uint})} + uint))) + (let ((state (unwrap! result result))) + (let ((remaining (get remaining state)) + (ctx (get ctx state)) + (parsed-hash (try! (read-hashslice ctx))) + (parsed-index (try! (read-uint32 (get ctx parsed-hash)))) + (parsed-scriptSig (try! (read-varslice (get ctx parsed-index)))) + (parsed-sequence (try! (read-uint32 (get ctx parsed-scriptSig)))) + (new-ctx (get ctx parsed-sequence))) + (ok {ctx: new-ctx, + remaining: (- remaining u1), + txins: (unwrap! + (as-max-len? + (append (get txins state) { outpoint: { + hash: (get hashslice parsed-hash), + index: (get uint32 parsed-index) }, + scriptSig: (unwrap! (as-max-len? (get varslice parsed-scriptSig) u256) (err ERR-VARSLICE-TOO-LONG)), + sequence: (get uint32 parsed-sequence)}) u8) + (err ERR-TOO-MANY-TXINS))})) + )) + +;; Read a transaction's inputs. +;; Returns (ok { txins: (list { ... }), remaining: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success, and updates the index in ctx to point to the start of the tx outputs. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptSig that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXINS) if there are more than eight inputs to read. +(define-read-only (read-txins (ctx { txbuff: (buff 4096), index: uint})) + (let ((parsed-num-txins (try! (read-varint ctx))) + (num-txins (get varint parsed-num-txins)) + (new-ctx (get ctx parsed-num-txins))) + (if (> num-txins u8) + (err ERR-TOO-MANY-TXINS) + (fold read-next-txin (bool-list-of-len num-txins) (ok { ctx: new-ctx, remaining: num-txins, txins: (list)}))))) + +;; Read the next transaction output, and update the index in ctx to point to the next output. +;; Returns (ok { ... }) on success +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptPubKey that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXOUTS) if there are more than eight outputs to read. +(define-read-only (read-next-txout (ignored bool) + (result (response {ctx: { txbuff: (buff 4096), index: uint }, + txouts: (list 8 {value: uint, + scriptPubKey: (buff 128)})} + uint))) + (let ((state (unwrap! result result)) + (parsed-value (try! (read-uint64 (get ctx state)))) + (parsed-script (try! (read-varslice (get ctx parsed-value)))) + (new-ctx (get ctx parsed-script))) + (ok {ctx: new-ctx, + txouts: (unwrap! + (as-max-len? + (append (get txouts state) + { value: (get uint64 parsed-value), + scriptPubKey: (unwrap! (as-max-len? (get varslice parsed-script) u128) (err ERR-VARSLICE-TOO-LONG))}) u8) + (err ERR-TOO-MANY-TXOUTS))}))) + +;; Read all transaction outputs in a transaction. Update the index to point to the first byte after the outputs, if all goes well. +;; Returns (ok { txouts: (list { ... }), remaining: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success, and updates the index in ctx to point to the start of the tx outputs. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptPubKey that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXOUTS) if there are more than eight outputs to read. +(define-read-only (read-txouts (ctx { txbuff: (buff 4096), index: uint})) + (let ((parsed-num-txouts (try! (read-varint ctx))) + (num-txouts (get varint parsed-num-txouts)) + (new-ctx (get ctx parsed-num-txouts))) + (if (> num-txouts u8) + (err ERR-TOO-MANY-TXOUTS) + (fold read-next-txout (bool-list-of-len num-txouts) (ok { ctx: new-ctx, txouts: (list)}))))) + +;; Read the stack item of the witness field, and update the index in ctx to point to the next item. +(define-read-only (read-next-item (ignored bool) + (result (response {ctx: { txbuff: (buff 4096), index: uint }, + items: (list 8 (buff 128))} + uint))) + (let ((state (unwrap! result result)) + (parsed-item (try! (read-varslice (get ctx state)))) + (new-ctx (get ctx parsed-item))) + (ok {ctx: new-ctx, + items: (unwrap! + (as-max-len? + (append (get items state) (unwrap! (as-max-len? (get varslice parsed-item) u128) (err ERR-VARSLICE-TOO-LONG))) u8) + (err ERR-TOO-MANY-WITNESSES))}))) + +;; Read the next witness data, and update the index in ctx to point to the next witness. +(define-read-only (read-next-witness (ignored bool) + (result (response + { ctx: {txbuff: (buff 4096), index: uint}, witnesses: (list 8 (list 8 (buff 128))) } uint))) + (let ((state (unwrap! result result)) + (parsed-num-items (try! (read-varint (get ctx state)))) + (ctx (get ctx parsed-num-items)) + (varint (get varint parsed-num-items))) + (if (> varint u0) + ;; read all stack items for current txin and add to witnesses. + (let ((parsed-items (try! (fold read-next-item (bool-list-of-len varint) (ok { ctx: ctx, items: (list)}))))) + (ok { + witnesses: (unwrap-panic (as-max-len? (append (get witnesses state) (get items parsed-items)) u8)), + ctx: (get ctx parsed-items) + })) + ;; txin has not witness data, add empty list to witnesses. + (ok { + witnesses: (unwrap-panic (as-max-len? (append (get witnesses state) (list)) u8)), + ctx: ctx + })))) + +;; Read all witness data in a transaction. Update the index to point to the end of the tx, if all goes well. +;; Returns (ok {witnesses: (list 8 (list 8 (buff 128))), ctx: { txbuff: (buff 4096), index: uint } }) on success, and updates the index in ctx to point after the end of the tx. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptPubKey that's too long to parse. +;; Returns (err ERR-TOO-MANY-WITNESSES) if there are more than eight witness data or stack items to read. +(define-read-only (read-witnesses (ctx { txbuff: (buff 4096), index: uint }) (num-txins uint)) + (fold read-next-witness (bool-list-of-len num-txins) (ok { ctx: ctx, witnesses: (list) }))) + +;; +;; Parses a Bitcoin transaction, with up to 8 inputs and 8 outputs, with scriptSigs of up to 256 bytes each, and with scriptPubKeys up to 128 bytes. +;; It will also calculate and return the TXID if calculate-txid is set to true. +;; Returns a tuple structured as follows on success: +;; (ok { +;; version: uint, ;; tx version +;; segwit-marker: uint, +;; segwit-version: uint, +;; txid: (optional (buff 32)) +;; ins: (list 8 +;; { +;; outpoint: { ;; pointer to the utxo this input consumes +;; hash: (buff 32), +;; index: uint +;; }, +;; scriptSig: (buff 256), ;; spending condition script +;; sequence: uint +;; }), +;; outs: (list 8 +;; { +;; value: uint, ;; satoshis sent +;; scriptPubKey: (buff 128) ;; parse this to get an address +;; }), +;; witnesses: (list 8 (list 8 (buff 128))), +;; locktime: uint +;; }) +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptPubKey or scriptSig that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXOUTS) if there are more than eight inputs to read. +;; Returns (err ERR-TOO-MANY-TXINS) if there are more than eight outputs to read. +;; Returns (err ERR-NOT-SEGWIT-TRANSACTION) if tx is not a segwit transaction. +;; Returns (err ERR-LEFTOVER-DATA) if the tx buffer contains leftover data at the end. +(define-read-only (parse-wtx (tx (buff 4096)) (calculate-txid bool)) + (let ((ctx { txbuff: tx, index: u0}) + (parsed-version (try! (read-uint32 ctx))) + (parsed-segwit-marker (try! (read-uint8 (get ctx parsed-version)))) + (parsed-segwit-version (try! (read-uint8 (get ctx parsed-segwit-marker)))) + (parsed-txins (try! (read-txins (get ctx parsed-segwit-version)))) + (parsed-txouts (try! (read-txouts (get ctx parsed-txins)))) + (parsed-witnesses (try! (read-witnesses (get ctx parsed-txouts) (len (get txins parsed-txins))))) + (parsed-locktime (try! (read-uint32 (get ctx parsed-witnesses)))) + ) + (asserts! (and (is-eq (get uint8 parsed-segwit-marker) u0) (is-eq (get uint8 parsed-segwit-version) u1)) (err ERR-NOT-SEGWIT-TRANSACTION)) + (asserts! (is-eq (len tx) (get index (get ctx parsed-locktime))) (err ERR-LEFTOVER-DATA)) + (ok {version: (get uint32 parsed-version), + segwit-marker: (get uint8 parsed-segwit-marker), + segwit-version: (get uint8 parsed-segwit-version), + ins: (get txins parsed-txins), + outs: (get txouts parsed-txouts), + txid: (if calculate-txid + (some (reverse-buff32 (sha256 (sha256 + (concat + (unwrap-panic (slice? tx u0 u4)) + (concat + (unwrap-panic (slice? tx (get index (get ctx parsed-segwit-version)) (get index (get ctx parsed-txouts)))) + (unwrap-panic (slice? tx (get index (get ctx parsed-witnesses)) (len tx))))))))) + none), + witnesses: (get witnesses parsed-witnesses), + locktime: (get uint32 parsed-locktime) + }))) + +;; +;; Parses a Bitcoin transaction, with up to 8 inputs and 8 outputs, with scriptSigs of up to 256 bytes each, and with scriptPubKeys up to 128 bytes. +;; Returns a tuple structured as follows on success: +;; (ok { +;; version: uint, ;; tx version +;; ins: (list 8 +;; { +;; outpoint: { ;; pointer to the utxo this input consumes +;; hash: (buff 32), +;; index: uint +;; }, +;; scriptSig: (buff 256), ;; spending condition script +;; sequence: uint +;; }), +;; outs: (list 8 +;; { +;; value: uint, ;; satoshis sent +;; scriptPubKey: (buff 128) ;; parse this to get an address +;; }), +;; locktime: uint +;; }) +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptPubKey or scriptSig that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXOUTS) if there are more than eight inputs to read. +;; Returns (err ERR-TOO-MANY-TXINS) if there are more than eight outputs to read. +;; Returns (err ERR-LEFTOVER-DATA) if the tx buffer contains leftover data at the end. +(define-read-only (parse-tx (tx (buff 4096))) + (let ((ctx { txbuff: tx, index: u0}) + (parsed-version (try! (read-uint32 ctx))) + (parsed-txins (try! (read-txins (get ctx parsed-version)))) + (parsed-txouts (try! (read-txouts (get ctx parsed-txins)))) + (parsed-locktime (try! (read-uint32 (get ctx parsed-txouts))))) + ;; check if it is a non-segwit transaction? + ;; at least check what happens + (asserts! (is-eq (len tx) (get index (get ctx parsed-locktime))) (err ERR-LEFTOVER-DATA)) + (ok {version: (get uint32 parsed-version), + ins: (get txins parsed-txins), + outs: (get txouts parsed-txouts), + locktime: (get uint32 parsed-locktime)}))) + +;; Parse a Bitcoin block header. +;; Returns a tuple structured as folowed on success: +;; (ok { +;; version: uint, ;; block version, +;; parent: (buff 32), ;; parent block hash, +;; merkle-root: (buff 32), ;; merkle root for all this block's transactions +;; timestamp: uint, ;; UNIX epoch timestamp of this block, in seconds +;; nbits: uint, ;; compact block difficulty representation +;; nonce: uint ;; PoW solution +;; }) +(define-read-only (parse-block-header (headerbuff (buff 80))) + (let ((ctx { txbuff: headerbuff, index: u0}) + (parsed-version (try! (read-uint32 ctx))) + (parsed-parent-hash (try! (read-hashslice (get ctx parsed-version)))) + (parsed-merkle-root (try! (read-hashslice (get ctx parsed-parent-hash)))) + (parsed-timestamp (try! (read-uint32 (get ctx parsed-merkle-root)))) + (parsed-nbits (try! (read-uint32 (get ctx parsed-timestamp)))) + (parsed-nonce (try! (read-uint32 (get ctx parsed-nbits))))) + (ok {version: (get uint32 parsed-version), + parent: (get hashslice parsed-parent-hash), + merkle-root: (get hashslice parsed-merkle-root), + timestamp: (get uint32 parsed-timestamp), + nbits: (get uint32 parsed-nbits), + nonce: (get uint32 parsed-nonce)}))) + +;; MOCK section +(define-constant DEBUG-MODE true) + +(define-map mock-burnchain-header-hashes uint (buff 32)) + +(define-public (mock-add-burnchain-block-header-hash (burn-height uint) (hash (buff 32))) + (ok (map-set mock-burnchain-header-hashes burn-height hash))) + +(define-read-only (get-bc-h-hash (bh uint)) + (if DEBUG-MODE (map-get? mock-burnchain-header-hashes bh) (get-burn-block-info? header-hash bh))) + +;; END MOCK section + +;; Verify that a block header hashes to a burnchain header hash at a given height. +;; Returns true if so; false if not. +(define-read-only (verify-block-header (headerbuff (buff 80)) (expected-block-height uint)) + (match (get-bc-h-hash expected-block-height) + bhh (is-eq bhh (reverse-buff32 (sha256 (sha256 headerbuff)))) + false)) + +;; Get the txid of a transaction, but little-endian. +;; This is the reverse of what you see on block explorers. +(define-read-only (get-reversed-txid (tx (buff 4096))) + (sha256 (sha256 tx))) + +;; Get the txid of a transaction. +;; This is what you see on block explorers. +(define-read-only (get-txid (tx (buff 4096))) + (reverse-buff32 (sha256 (sha256 tx)))) + +;; Determine if the ith bit in a uint is set to 1 +(define-read-only (is-bit-set (val uint) (bit uint)) + (> (bit-and val (bit-shift-left u1 bit)) u0)) + +;; Verify the next step of a Merkle proof. +;; This hashes cur-hash against the ctr-th hash in proof-hashes, and uses that as the next cur-hash. +;; The path is a bitfield describing the walk from the txid up to the merkle root: +;; * if the ith bit is 0, then cur-hash is hashed before the next proof-hash (cur-hash is "left"). +;; * if the ith bit is 1, then the next proof-hash is hashed before cur-hash (cur-hash is "right"). +;; The proof verifies if cur-hash is equal to root-hash, and we're out of proof-hashes to check. +;; Note, ctr is expected to be < (len proof-hashes), verified can be true only if ctr + 1 == (len proof-hashes). +(define-private (inner-merkle-proof-verify (ctr uint) (state { path: uint, root-hash: (buff 32), proof-hashes: (list 14 (buff 32)), tree-depth: uint, cur-hash: (buff 32), verified: bool})) + (let ((path (get path state)) + (is-left (is-bit-set path ctr)) + (proof-hashes (get proof-hashes state)) + (cur-hash (get cur-hash state)) + (root-hash (get root-hash state)) + + (h1 (if is-left (unwrap-panic (element-at proof-hashes ctr)) cur-hash)) + (h2 (if is-left cur-hash (unwrap-panic (element-at proof-hashes ctr)))) + (next-hash (sha256 (sha256 (concat h1 h2)))) + (is-verified (and (is-eq (+ u1 ctr) (len proof-hashes)) (is-eq next-hash root-hash)))) + (merge state { cur-hash: next-hash, verified: is-verified}))) + +;; Verify a Merkle proof, given the _reversed_ txid of a transaction, the merkle root of its block, and a proof consisting of: +;; * The index in the block where the transaction can be found (starting from 0), +;; * The list of hashes that link the txid to the merkle root, +;; * The depth of the block's merkle tree (required because Bitcoin does not identify merkle tree nodes as being leaves or intermediates). +;; The _reversed_ txid is required because that's the order (little-endian) processes them in. +;; The tx-index is required because it tells us the left/right traversals we'd make if we were walking down the tree from root to transaction, +;; and is thus used to deduce the order in which to hash the intermediate hashes with one another to link the txid to the merkle root. +;; Returns (ok true) if the proof is valid. +;; Returns (ok false) if the proof is invalid. +;; Returns (err ERR-PROOF-TOO-SHORT) if the proof's hashes aren't long enough to link the txid to the merkle root. +(define-read-only (verify-merkle-proof (reversed-txid (buff 32)) (merkle-root (buff 32)) (proof { tx-index: uint, hashes: (list 14 (buff 32)), tree-depth: uint})) + (if (> (get tree-depth proof) (len (get hashes proof))) + (err ERR-PROOF-TOO-SHORT) + (ok + (get verified + (fold inner-merkle-proof-verify + (unwrap-panic (slice? (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11 u12 u13) u0 (get tree-depth proof))) + { path: (+ (pow u2 (get tree-depth proof)) (get tx-index proof)), root-hash: merkle-root, proof-hashes: (get hashes proof), cur-hash: reversed-txid, tree-depth: (get tree-depth proof), verified: false}))))) + +;; Helper for wtxid commitments + +;; Gets the scriptPubKey in the last output that follows the 0x6a24aa21a9ed pattern regardless of its content +;; as per BIP-0141 (https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#commitment-structure) +(define-read-only (get-commitment-scriptPubKey (outs (list 8 { value: uint, scriptPubKey: (buff 128) }))) + (fold inner-get-commitment-scriptPubKey outs 0x)) + +(define-read-only (inner-get-commitment-scriptPubKey (out { value: uint, scriptPubKey: (buff 128) }) (result (buff 128))) + (let ((commitment (get scriptPubKey out))) + (if (is-commitment-pattern commitment) commitment result))) + +;; Returns false, if scriptPubKey does not have the commitment prefix. +(define-read-only (is-commitment-pattern (scriptPubKey (buff 128))) + (asserts! (is-eq (unwrap! (slice? scriptPubKey u0 u6) false) 0x6a24aa21a9ed) false)) + +;; +;; Top-level verification functions +;; + +;; Determine whether or not a Bitcoin transaction without witnesses +;; was mined in a prior Bitcoin block. +;; It takes the block height, the transaction, the block header and a merkle proof, and determines that: +;; * the block header corresponds to the block that was mined at the given Bitcoin height +;; * the transaction's merkle proof links it to the block header's merkle root. + +;; To verify that the merkle root is part of the block header there are two options: +;; a) read the merkle root from the header buffer +;; b) build the header buffer from its parts including the merkle root +;; +;; The merkle proof is a list of sibling merkle tree nodes that allow us to calculate the parent node from two children nodes in each merkle tree level, +;; the depth of the block's merkle tree, and the index in the block in which the given transaction can be found (starting from 0). +;; The first element in hashes must be the given transaction's sibling transaction's ID. This and the given transaction's txid are hashed to +;; calculate the parent hash in the merkle tree, which is then hashed with the *next* hash in the proof, and so on and so forth, until the final +;; hash can be compared against the block header's merkle root field. The tx-index tells us in which order to hash each pair of siblings. +;; Note that the proof hashes -- including the sibling txid -- must be _little-endian_ hashes, because this is how Bitcoin generates them. +;; This is the reverse of what you'd see in a block explorer! +;; +;; Returns (ok true) if the proof checks out. +;; Returns (ok false) if not. +;; Returns (err ERR-PROOF-TOO-SHORT) if the proof doesn't contain enough intermediate hash nodes in the merkle tree. +(define-read-only (was-tx-mined-compact (height uint) (tx (buff 4096)) + (header (buff 80)) + (proof { tx-index: uint, hashes: (list 14 (buff 32)), tree-depth: uint})) + (let ((block (unwrap! (parse-block-header header) (err ERR-BAD-HEADER)))) + (was-tx-mined-internal height tx header (get merkle-root block) proof))) + +;; Private function to verify block header and merkle proof. +;; This function must only be called with the merkle root of the provided header. +;; Use was-tx-mined-compact with header as a buffer or +;; was-tx-mined with header as a tuple. +;; Returns txid if tx was mined else err u1 if the header is invalid or err u2 if the proof is invalid. +(define-private (was-tx-mined-internal (height uint) (tx (buff 4096)) (header (buff 80)) (merkle-root (buff 32)) (proof { tx-index: uint, hashes: (list 14 (buff 32)), tree-depth: uint})) + (if (verify-block-header header height) + (let ((reversed-txid (get-reversed-txid tx)) + (txid (reverse-buff32 reversed-txid))) + ;; verify merkle proof + (asserts! + (or + (is-eq merkle-root txid) ;; true, if the transaction is the only transaction + (try! (verify-merkle-proof reversed-txid (reverse-buff32 merkle-root) proof))) + (err ERR-INVALID-MERKLE-PROOF)) + (ok txid)) + (err ERR-HEADER-HEIGHT-MISMATCH))) + + +;; Determine whether or not a Bitcoin transaction +;; with witnesses was mined in a prior Bitcoin block. +;; It takes +;; a) the bitcoin block height, the transaction "tx" with witness data, +;; the bitcoin block header, the tx index in the block and +;; b) the depth of merkle proof of the block and +;; c) the merkle proof of the wtxid "wproof", its root "witness-merkle-proof", +;; the witness reserved value and +;; d) the coinbase transaction "ctx" without witnesses (non-segwit) and its merkle proof "cproof". +;; +;; It determines that: +;; * the block header corresponds to the block that was mined at the given Bitcoin height +;; * the coinbase tx was mined and it contains the commitment to the wtxids +;; * the wtxid of the tx is part of the commitment. +;; +;; The tree depth for wproof and cproof are the same. +;; The coinbase tx index is always 0. +;; +;; It returns (ok wtxid), if it was mined. +(define-read-only (was-segwit-tx-mined-compact + (height uint) + (wtx (buff 4096)) + (header (buff 80)) + (tx-index uint) + (tree-depth uint) + (wproof (list 14 (buff 32))) + (witness-merkle-root (buff 32)) + (witness-reserved-value (buff 32)) + (ctx (buff 1024)) + (cproof (list 14 (buff 32)))) + (begin + ;; verify that the coinbase tx is correct + (try! (was-tx-mined-compact height ctx header { tx-index: u0, hashes: cproof, tree-depth: tree-depth })) + (let ( + (witness-out (get-commitment-scriptPubKey (get outs (try! (parse-tx ctx))))) + (final-hash (sha256 (sha256 (concat witness-merkle-root witness-reserved-value)))) + (reversed-wtxid (get-reversed-txid wtx)) + (wtxid (reverse-buff32 reversed-wtxid)) + ) + ;; verify wtxid commitment + (asserts! (is-eq witness-out (concat 0x6a24aa21a9ed final-hash)) (err ERR-INVALID-COMMITMENT)) + ;; verify witness merkle tree + (asserts! (try! (verify-merkle-proof reversed-wtxid witness-merkle-root + { tx-index: tx-index, hashes: wproof, tree-depth: tree-depth })) (err ERR-WITNESS-TX-NOT-IN-COMMITMENT)) + (ok wtxid)))) diff --git a/components/clarinet-format/tests/golden-intended/comments.clar b/components/clarinet-format/tests/golden-intended/comments.clar new file mode 100644 index 000000000..5824e7904 --- /dev/null +++ b/components/clarinet-format/tests/golden-intended/comments.clar @@ -0,0 +1,33 @@ +;; comment +(define-read-only (get-offer + (id uint) + (w uint) + ) + (map-get? offers-map id) +) + +(define-read-only (get-offer) + (ok 1) +) + + ;; top comment + ;; @ignore-formatting +(define-constant something + (+ 1 1) +) ;; eol comment + +(define-read-only (something-else) + (begin + (+ 1 1) + (ok true) + ) +) + +(define-public (something-else + (a uint) + ) + (begin + (+ 1 1) + (ok true) + ) +) diff --git a/components/clarinet-format/tests/golden-intended/if.clar b/components/clarinet-format/tests/golden-intended/if.clar new file mode 100644 index 000000000..b16ecdabf --- /dev/null +++ b/components/clarinet-format/tests/golden-intended/if.clar @@ -0,0 +1,7 @@ +(let + (ids-to-recipient (if + (is-eq no-to-recipient u0) + (list ) + (unwrap-panic (slice? new-available-ids (- (len new-available-ids) no-to-recipient) (len new-available-ids)))) + ) +) diff --git a/components/clarinet-format/tests/golden-intended/match-or.clar b/components/clarinet-format/tests/golden-intended/match-or.clar new file mode 100644 index 000000000..5a8607592 --- /dev/null +++ b/components/clarinet-format/tests/golden-intended/match-or.clar @@ -0,0 +1,32 @@ +;; Determines if a character is a vowel (a, e, i, o, u, and y). +(define-private (is-vowel + (char (buff 1)) + ) + (or + (is-eq char 0x61) ;; a + (is-eq char 0x65) ;; e + (is-eq char 0x69) ;; i + (is-eq char 0x6f) ;; o + (is-eq char 0x75) ;; u + (is-eq char 0x79) ;; y + ) +) + +;; pre comment +(define-private (something) + (match opt + value + (ok (handle-new-value value)) + (ok 1) + ) +) + +(define-read-only (is-borroweable-isolated + (asset principal) + ) + (match (index-of? (contract-call? .pool-reserve-data get-borroweable-isolated-read) asset) + res + true + false + ) +) diff --git a/components/clarinet-format/tests/golden-intended/nested_map.clar b/components/clarinet-format/tests/golden-intended/nested_map.clar new file mode 100644 index 000000000..8529118bf --- /dev/null +++ b/components/clarinet-format/tests/golden-intended/nested_map.clar @@ -0,0 +1,26 @@ +(define-public (mng-name-register) + (map-set name-properties + { + name: name, namespace: namespace + } + { + registered-at: (some burn-block-height), + imported-at: none, + hashed-salted-fqn-preorder: (some hashed-salted-fqn), + preordered-by: (some send-to), + ;; Updated this to be u0, so that renewals are handled through the namespace manager + renewal-height: u0, + stx-burn: u0, + owner: send-to, + } + ) + (print + { + topic: "new-name", + owner: send-to, + name: {name: name, namespace: namespace}, + id: id-to-be-minted, + properties: (map-get? name-properties {name: name, namespace: namespace}) + } + ) +) diff --git a/components/clarinet-format/tests/golden-intended/test.clar b/components/clarinet-format/tests/golden-intended/test.clar new file mode 100644 index 000000000..f22d50209 --- /dev/null +++ b/components/clarinet-format/tests/golden-intended/test.clar @@ -0,0 +1,35 @@ +;; private functions +;; #[allow(unchecked_data)] +(define-private (complete-individual-deposits-helper + (deposit { + txid: (buff 32), + vout-index: uint, + amount: uint, + recipient: principal, + burn-hash: (buff 32), + burn-height: uint, + sweep-txid: (buff 32) + }) + (helper-response (response uint uint)) + ) + (match helper-response + index + (begin + (try! + (unwrap! (complete-deposit-wrapper + (get txid deposit) + (get vout-index deposit) + (get amount deposit) + (get recipient deposit) + (get burn-hash deposit) + (get burn-height deposit) + (get sweep-txid deposit) + ) + (err (+ ERR_DEPOSIT_INDEX_PREFIX (+ u10 index))) + ) + ) + (ok (+ index u1)) + ) + err-response (err err-response) + ) +) diff --git a/components/clarinet-format/tests/golden-intended/traits.clar b/components/clarinet-format/tests/golden-intended/traits.clar new file mode 100644 index 000000000..cace389b2 --- /dev/null +++ b/components/clarinet-format/tests/golden-intended/traits.clar @@ -0,0 +1,7 @@ +(use-trait token-a-trait 'SPAXYA5XS51713FDTQ8H94EJ4V579CXMTRNBZKSF.token-a.token-trait) + +(define-public (forward-get-balance (user principal) (contract )) + (begin + (ok (contract-of contract)) ;; returns the principal of the contract implementing + ) +) diff --git a/components/clarinet-format/tests/golden-intended/tuple.clar b/components/clarinet-format/tests/golden-intended/tuple.clar new file mode 100644 index 000000000..6f3d7135d --- /dev/null +++ b/components/clarinet-format/tests/golden-intended/tuple.clar @@ -0,0 +1,17 @@ +(define-public (set-user-reserve + (user principal) + (asset principal) + (state { + principal-borrow-balance: uint, + last-variable-borrow-cumulative-index: uint, + origination-fee: uint, + stable-borrow-rate: uint, + last-updated-block: uint, ;; comment + use-as-collateral: bool + }) + ) + (begin + (asserts! (is-lending-pool contract-caller) ERR_UNAUTHORIZED) + (contract-call? .pool-reserve-data set-user-reserve-data user asset state) + ) +) diff --git a/components/clarinet-format/tests/golden/alex-transfer.clar b/components/clarinet-format/tests/golden/alex-transfer.clar new file mode 100644 index 000000000..c698442df --- /dev/null +++ b/components/clarinet-format/tests/golden/alex-transfer.clar @@ -0,0 +1,37 @@ +;; max_line_length: 80, indentation: 4 +;; https://github.com/alexgo-io/alex-v1/blob/dev/clarity/contracts/stx404-token/token-stx404.clar#L60-L94 +(define-public (transfer (amount-or-id uint) (sender principal) (recipient principal)) + (begin + (asserts! (is-eq sender tx-sender) err-not-authorised) + (if (<= amount-or-id max-supply) ;; id transfer + (let ( + (check-id (asserts! (is-id-owned-by-or-default amount-or-id sender) err-invalid-id)) + (owned-by-sender (get-owned-or-default sender)) + (owned-by-recipient (get-owned-or-default recipient)) + (id-idx (unwrap-panic (index-of? owned-by-sender amount-or-id)))) + (map-set owned sender (pop owned-by-sender id-idx)) + (map-set owned recipient (unwrap-panic (as-max-len? (append owned-by-recipient amount-or-id) u10000))) + (try! (ft-transfer? stx404 one-8 sender recipient)) + (try! (nft-transfer? stx404nft amount-or-id sender recipient)) + (ok true)) + (let ( + (balance-sender (unwrap-panic (get-balance sender))) + (balance-recipient (unwrap-panic (get-balance recipient))) + (check-balance (try! (ft-transfer? stx404 amount-or-id sender recipient))) + (no-to-treasury (- (/ balance-sender one-8) (/ (- balance-sender amount-or-id) one-8))) + (no-to-recipient (- (/ (+ balance-recipient amount-or-id) one-8) (/ balance-recipient one-8))) + (owned-by-sender (get-owned-or-default sender)) + (owned-by-recipient (get-owned-or-default recipient)) + (ids-to-treasury (if (is-eq no-to-treasury u0) (list ) (unwrap-panic (slice? owned-by-sender (- (len owned-by-sender) no-to-treasury) (len owned-by-sender))))) + (new-available-ids (if (is-eq no-to-treasury u0) (var-get available-ids) (unwrap-panic (as-max-len? (concat (var-get available-ids) ids-to-treasury) u10000)))) + (ids-to-recipient (if (is-eq no-to-recipient u0) (list ) (unwrap-panic (slice? new-available-ids (- (len new-available-ids) no-to-recipient) (len new-available-ids)))))) + (var-set sender-temp sender) + (var-set recipient-temp (as-contract tx-sender)) + (and (> no-to-treasury u0) (try! (fold check-err (map nft-transfer-iter ids-to-treasury) (ok true)))) + (var-set sender-temp (as-contract tx-sender)) + (var-set recipient-temp recipient) + (and (> no-to-recipient u0) (try! (fold check-err (map nft-transfer-iter ids-to-recipient) (ok true)))) + (map-set owned sender (if (is-eq no-to-treasury u0) owned-by-sender (unwrap-panic (slice? owned-by-sender u0 (- (len owned-by-sender) no-to-treasury))))) + (map-set owned recipient (if (is-eq no-to-recipient u0) owned-by-recipient (unwrap-panic (as-max-len? (concat owned-by-recipient ids-to-recipient) u10000)))) + (var-set available-ids (if (is-eq no-to-recipient u0) new-available-ids (unwrap-panic (slice? new-available-ids u0 (- (len new-available-ids) no-to-recipient))))) + (ok true))))) diff --git a/components/clarinet-format/tests/golden/clarity-bitcoin.clar b/components/clarinet-format/tests/golden/clarity-bitcoin.clar new file mode 100644 index 000000000..ff97c13fd --- /dev/null +++ b/components/clarinet-format/tests/golden/clarity-bitcoin.clar @@ -0,0 +1,561 @@ +;; max_line_length: 80, indentation: tab +;; source: https://github.com/hirosystems/clarity-examples/blob/main/examples/clarity-bitcoin/contracts/clarity-bitcoin.clar + +;; @contract stateless contract to verify bitcoin transaction +;; @version 5 + +;; version 5 adds support for txid generation and improves security + +;; Error codes +(define-constant ERR-OUT-OF-BOUNDS u1) +(define-constant ERR-TOO-MANY-TXINS u2) +(define-constant ERR-TOO-MANY-TXOUTS u3) +(define-constant ERR-VARSLICE-TOO-LONG u4) +(define-constant ERR-BAD-HEADER u5) +(define-constant ERR-HEADER-HEIGHT-MISMATCH u6) +(define-constant ERR-INVALID-MERKLE-PROOF u7) +(define-constant ERR-PROOF-TOO-SHORT u8) +(define-constant ERR-TOO-MANY-WITNESSES u9) +(define-constant ERR-INVALID-COMMITMENT u10) +(define-constant ERR-WITNESS-TX-NOT-IN-COMMITMENT u11) +(define-constant ERR-NOT-SEGWIT-TRANSACTION u12) +(define-constant ERR-LEFTOVER-DATA u13) + +;; +;; Helper functions to parse bitcoin transactions +;; + +;; Create a list with n elments `true`. n must be smaller than 9. +(define-private (bool-list-of-len (n uint)) + (unwrap-panic (slice? (list true true true true true true true true) u0 n))) + +;; Reads the next two bytes from txbuff as a little-endian 16-bit integer, and updates the index. +;; Returns (ok { uint16: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff +(define-read-only (read-uint8 (ctx { txbuff: (buff 4096), index: uint})) + (let ((data (get txbuff ctx)) + (base (get index ctx))) + (ok {uint8: (buff-to-uint-le (unwrap-panic (as-max-len? (unwrap! (slice? data base (+ base u1)) (err ERR-OUT-OF-BOUNDS)) u1))), + ctx: { txbuff: data, index: (+ u1 base)}}))) + +;; Reads the next two bytes from txbuff as a little-endian 16-bit integer, and updates the index. +;; Returns (ok { uint16: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff +(define-read-only (read-uint16 (ctx { txbuff: (buff 4096), index: uint})) + (let ((data (get txbuff ctx)) + (base (get index ctx))) + (ok {uint16: (buff-to-uint-le (unwrap-panic (as-max-len? (unwrap! (slice? data base (+ base u2)) (err ERR-OUT-OF-BOUNDS)) u2))), + ctx: { txbuff: data, index: (+ u2 base)}}))) + +;; Reads the next four bytes from txbuff as a little-endian 32-bit integer, and updates the index. +;; Returns (ok { uint32: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff +(define-read-only (read-uint32 (ctx { txbuff: (buff 4096), index: uint})) + (let ((data (get txbuff ctx)) + (base (get index ctx))) + (ok {uint32: (buff-to-uint-le (unwrap-panic (as-max-len? (unwrap! (slice? data base (+ base u4)) (err ERR-OUT-OF-BOUNDS)) u4))), + ctx: { txbuff: data, index: (+ u4 base)}}))) + +;; Reads the next eight bytes from txbuff as a little-endian 64-bit integer, and updates the index. +;; Returns (ok { uint64: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff +(define-read-only (read-uint64 (ctx { txbuff: (buff 4096), index: uint})) + (let ((data (get txbuff ctx)) + (base (get index ctx))) + (ok {uint64: (buff-to-uint-le (unwrap-panic (as-max-len? (unwrap! (slice? data base (+ base u8)) (err ERR-OUT-OF-BOUNDS)) u8))), + ctx: { txbuff: data, index: (+ u8 base)}}))) + +;; Reads the next varint from txbuff, and updates the index. +;; Returns (ok { varint: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +(define-read-only (read-varint (ctx { txbuff: (buff 4096), index: uint})) + (let ((ptr (get index ctx)) + (tx (get txbuff ctx)) + (byte (buff-to-uint-le (unwrap! (element-at tx ptr) + (err ERR-OUT-OF-BOUNDS))))) + (if (<= byte u252) + ;; given byte is the varint + (ok { varint: byte, ctx: { txbuff: tx, index: (+ u1 ptr)}}) + (if (is-eq byte u253) + (let ( + ;; next two bytes is the varint + (parsed-u16 (try! (read-uint16 { txbuff: tx, index: (+ u1 ptr)})))) + (ok { varint: (get uint16 parsed-u16), ctx: (get ctx parsed-u16)})) + (if (is-eq byte u254) + (let ( + ;; next four bytes is the varint + (parsed-u32 (try! (read-uint32 { txbuff: tx, index: (+ u1 ptr)})))) + (ok { varint: (get uint32 parsed-u32), ctx: (get ctx parsed-u32)})) + (let ( + ;; next eight bytes is the varint + (parsed-u64 (try! (read-uint64 { txbuff: tx, index: (+ u1 ptr)})))) + (ok { varint: (get uint64 parsed-u64), ctx: (get ctx parsed-u64)}))))))) + +;; Reads a varint-prefixed byte slice from txbuff, and updates the index to point to the byte after the varint and slice. +;; Returns (ok { varslice: (buff 4096), ctx: { txbuff: (buff 4096), index: uint } }) on success, where varslice has the length of the varint prefix. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +(define-read-only (read-varslice (old-ctx { txbuff: (buff 4096), index: uint})) + (let ((parsed (try! (read-varint old-ctx))) + (ctx (get ctx parsed)) + (slice-start (get index ctx)) + (target-index (+ slice-start (get varint parsed))) + (txbuff (get txbuff ctx))) + (ok {varslice: (unwrap! (slice? txbuff slice-start target-index) (err ERR-OUT-OF-BOUNDS)), + ctx: { txbuff: txbuff, index: target-index}}))) + +(define-private (reverse-buff16 (input (buff 16))) + (unwrap-panic (slice? (unwrap-panic (to-consensus-buff? (buff-to-uint-le input))) u1 u17))) + +(define-read-only (reverse-buff32 (input (buff 32))) + (unwrap-panic (as-max-len? (concat + (reverse-buff16 (unwrap-panic (as-max-len? (unwrap-panic (slice? input u16 u32)) u16))) + (reverse-buff16 (unwrap-panic (as-max-len? (unwrap-panic (slice? input u0 u16)) u16)))) u32))) + +;; Reads a little-endian hash -- consume the next 32 bytes, and reverse them. +;; Returns (ok { hashslice: (buff 32), ctx: { txbuff: (buff 4096), index: uint } }) on success, and updates the index. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +(define-read-only (read-hashslice (old-ctx { txbuff: (buff 4096), index: uint})) + (let ((slice-start (get index old-ctx)) + (target-index (+ u32 slice-start)) + (txbuff (get txbuff old-ctx)) + (hash-le (unwrap-panic + (as-max-len? (unwrap! + (slice? txbuff slice-start target-index) (err ERR-OUT-OF-BOUNDS)) u32)))) + (ok {hashslice: (reverse-buff32 hash-le), + ctx: { txbuff: txbuff, index: target-index}}))) + +;; Inner fold method to read the next tx input from txbuff. +;; The index in ctx will be updated to point to the next tx input if all goes well (or to the start of the outputs) +;; Returns (ok { ... }) on success. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptSig that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXINS) if there are more than eight inputs to read. +(define-read-only (read-next-txin (ignored bool) + (result (response {ctx: { txbuff: (buff 4096), index: uint }, + remaining: uint, + txins: (list 8 {outpoint: { + hash: (buff 32), + index: uint}, + scriptSig: (buff 256), ;; just big enough to hold a 2-of-3 multisig script + sequence: uint})} + uint))) + (let ((state (unwrap! result result))) + (let ((remaining (get remaining state)) + (ctx (get ctx state)) + (parsed-hash (try! (read-hashslice ctx))) + (parsed-index (try! (read-uint32 (get ctx parsed-hash)))) + (parsed-scriptSig (try! (read-varslice (get ctx parsed-index)))) + (parsed-sequence (try! (read-uint32 (get ctx parsed-scriptSig)))) + (new-ctx (get ctx parsed-sequence))) + (ok {ctx: new-ctx, + remaining: (- remaining u1), + txins: (unwrap! + (as-max-len? + (append (get txins state) { outpoint: { + hash: (get hashslice parsed-hash), + index: (get uint32 parsed-index) }, + scriptSig: (unwrap! (as-max-len? (get varslice parsed-scriptSig) u256) (err ERR-VARSLICE-TOO-LONG)), + sequence: (get uint32 parsed-sequence)}) u8) + (err ERR-TOO-MANY-TXINS))})) + )) + +;; Read a transaction's inputs. +;; Returns (ok { txins: (list { ... }), remaining: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success, and updates the index in ctx to point to the start of the tx outputs. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptSig that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXINS) if there are more than eight inputs to read. +(define-read-only (read-txins (ctx { txbuff: (buff 4096), index: uint})) + (let ((parsed-num-txins (try! (read-varint ctx))) + (num-txins (get varint parsed-num-txins)) + (new-ctx (get ctx parsed-num-txins))) + (if (> num-txins u8) + (err ERR-TOO-MANY-TXINS) + (fold read-next-txin (bool-list-of-len num-txins) (ok { ctx: new-ctx, remaining: num-txins, txins: (list)}))))) + +;; Read the next transaction output, and update the index in ctx to point to the next output. +;; Returns (ok { ... }) on success +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptPubKey that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXOUTS) if there are more than eight outputs to read. +(define-read-only (read-next-txout (ignored bool) + (result (response {ctx: { txbuff: (buff 4096), index: uint }, + txouts: (list 8 {value: uint, + scriptPubKey: (buff 128)})} + uint))) + (let ((state (unwrap! result result)) + (parsed-value (try! (read-uint64 (get ctx state)))) + (parsed-script (try! (read-varslice (get ctx parsed-value)))) + (new-ctx (get ctx parsed-script))) + (ok {ctx: new-ctx, + txouts: (unwrap! + (as-max-len? + (append (get txouts state) + { value: (get uint64 parsed-value), + scriptPubKey: (unwrap! (as-max-len? (get varslice parsed-script) u128) (err ERR-VARSLICE-TOO-LONG))}) u8) + (err ERR-TOO-MANY-TXOUTS))}))) + +;; Read all transaction outputs in a transaction. Update the index to point to the first byte after the outputs, if all goes well. +;; Returns (ok { txouts: (list { ... }), remaining: uint, ctx: { txbuff: (buff 4096), index: uint } }) on success, and updates the index in ctx to point to the start of the tx outputs. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptPubKey that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXOUTS) if there are more than eight outputs to read. +(define-read-only (read-txouts (ctx { txbuff: (buff 4096), index: uint})) + (let ((parsed-num-txouts (try! (read-varint ctx))) + (num-txouts (get varint parsed-num-txouts)) + (new-ctx (get ctx parsed-num-txouts))) + (if (> num-txouts u8) + (err ERR-TOO-MANY-TXOUTS) + (fold read-next-txout (bool-list-of-len num-txouts) (ok { ctx: new-ctx, txouts: (list)}))))) + +;; Read the stack item of the witness field, and update the index in ctx to point to the next item. +(define-read-only (read-next-item (ignored bool) + (result (response {ctx: { txbuff: (buff 4096), index: uint }, + items: (list 8 (buff 128))} + uint))) + (let ((state (unwrap! result result)) + (parsed-item (try! (read-varslice (get ctx state)))) + (new-ctx (get ctx parsed-item))) + (ok {ctx: new-ctx, + items: (unwrap! + (as-max-len? + (append (get items state) (unwrap! (as-max-len? (get varslice parsed-item) u128) (err ERR-VARSLICE-TOO-LONG))) u8) + (err ERR-TOO-MANY-WITNESSES))}))) + +;; Read the next witness data, and update the index in ctx to point to the next witness. +(define-read-only (read-next-witness (ignored bool) + (result (response + { ctx: {txbuff: (buff 4096), index: uint}, witnesses: (list 8 (list 8 (buff 128))) } uint))) + (let ((state (unwrap! result result)) + (parsed-num-items (try! (read-varint (get ctx state)))) + (ctx (get ctx parsed-num-items)) + (varint (get varint parsed-num-items))) + (if (> varint u0) + ;; read all stack items for current txin and add to witnesses. + (let ((parsed-items (try! (fold read-next-item (bool-list-of-len varint) (ok { ctx: ctx, items: (list)}))))) + (ok { + witnesses: (unwrap-panic (as-max-len? (append (get witnesses state) (get items parsed-items)) u8)), + ctx: (get ctx parsed-items) + })) + ;; txin has not witness data, add empty list to witnesses. + (ok { + witnesses: (unwrap-panic (as-max-len? (append (get witnesses state) (list)) u8)), + ctx: ctx + })))) + +;; Read all witness data in a transaction. Update the index to point to the end of the tx, if all goes well. +;; Returns (ok {witnesses: (list 8 (list 8 (buff 128))), ctx: { txbuff: (buff 4096), index: uint } }) on success, and updates the index in ctx to point after the end of the tx. +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptPubKey that's too long to parse. +;; Returns (err ERR-TOO-MANY-WITNESSES) if there are more than eight witness data or stack items to read. +(define-read-only (read-witnesses (ctx { txbuff: (buff 4096), index: uint }) (num-txins uint)) + (fold read-next-witness (bool-list-of-len num-txins) (ok { ctx: ctx, witnesses: (list) }))) + +;; +;; Parses a Bitcoin transaction, with up to 8 inputs and 8 outputs, with scriptSigs of up to 256 bytes each, and with scriptPubKeys up to 128 bytes. +;; It will also calculate and return the TXID if calculate-txid is set to true. +;; Returns a tuple structured as follows on success: +;; (ok { +;; version: uint, ;; tx version +;; segwit-marker: uint, +;; segwit-version: uint, +;; txid: (optional (buff 32)) +;; ins: (list 8 +;; { +;; outpoint: { ;; pointer to the utxo this input consumes +;; hash: (buff 32), +;; index: uint +;; }, +;; scriptSig: (buff 256), ;; spending condition script +;; sequence: uint +;; }), +;; outs: (list 8 +;; { +;; value: uint, ;; satoshis sent +;; scriptPubKey: (buff 128) ;; parse this to get an address +;; }), +;; witnesses: (list 8 (list 8 (buff 128))), +;; locktime: uint +;; }) +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptPubKey or scriptSig that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXOUTS) if there are more than eight inputs to read. +;; Returns (err ERR-TOO-MANY-TXINS) if there are more than eight outputs to read. +;; Returns (err ERR-NOT-SEGWIT-TRANSACTION) if tx is not a segwit transaction. +;; Returns (err ERR-LEFTOVER-DATA) if the tx buffer contains leftover data at the end. +(define-read-only (parse-wtx (tx (buff 4096)) (calculate-txid bool)) + (let ((ctx { txbuff: tx, index: u0}) + (parsed-version (try! (read-uint32 ctx))) + (parsed-segwit-marker (try! (read-uint8 (get ctx parsed-version)))) + (parsed-segwit-version (try! (read-uint8 (get ctx parsed-segwit-marker)))) + (parsed-txins (try! (read-txins (get ctx parsed-segwit-version)))) + (parsed-txouts (try! (read-txouts (get ctx parsed-txins)))) + (parsed-witnesses (try! (read-witnesses (get ctx parsed-txouts) (len (get txins parsed-txins))))) + (parsed-locktime (try! (read-uint32 (get ctx parsed-witnesses)))) + ) + (asserts! (and (is-eq (get uint8 parsed-segwit-marker) u0) (is-eq (get uint8 parsed-segwit-version) u1)) (err ERR-NOT-SEGWIT-TRANSACTION)) + (asserts! (is-eq (len tx) (get index (get ctx parsed-locktime))) (err ERR-LEFTOVER-DATA)) + (ok {version: (get uint32 parsed-version), + segwit-marker: (get uint8 parsed-segwit-marker), + segwit-version: (get uint8 parsed-segwit-version), + ins: (get txins parsed-txins), + outs: (get txouts parsed-txouts), + txid: (if calculate-txid + (some (reverse-buff32 (sha256 (sha256 + (concat + (unwrap-panic (slice? tx u0 u4)) + (concat + (unwrap-panic (slice? tx (get index (get ctx parsed-segwit-version)) (get index (get ctx parsed-txouts)))) + (unwrap-panic (slice? tx (get index (get ctx parsed-witnesses)) (len tx))))))))) + none), + witnesses: (get witnesses parsed-witnesses), + locktime: (get uint32 parsed-locktime) + }))) + +;; +;; Parses a Bitcoin transaction, with up to 8 inputs and 8 outputs, with scriptSigs of up to 256 bytes each, and with scriptPubKeys up to 128 bytes. +;; Returns a tuple structured as follows on success: +;; (ok { +;; version: uint, ;; tx version +;; ins: (list 8 +;; { +;; outpoint: { ;; pointer to the utxo this input consumes +;; hash: (buff 32), +;; index: uint +;; }, +;; scriptSig: (buff 256), ;; spending condition script +;; sequence: uint +;; }), +;; outs: (list 8 +;; { +;; value: uint, ;; satoshis sent +;; scriptPubKey: (buff 128) ;; parse this to get an address +;; }), +;; locktime: uint +;; }) +;; Returns (err ERR-OUT-OF-BOUNDS) if we read past the end of txbuff. +;; Returns (err ERR-VARSLICE-TOO-LONG) if we find a scriptPubKey or scriptSig that's too long to parse. +;; Returns (err ERR-TOO-MANY-TXOUTS) if there are more than eight inputs to read. +;; Returns (err ERR-TOO-MANY-TXINS) if there are more than eight outputs to read. +;; Returns (err ERR-LEFTOVER-DATA) if the tx buffer contains leftover data at the end. +(define-read-only (parse-tx (tx (buff 4096))) + (let ((ctx { txbuff: tx, index: u0}) + (parsed-version (try! (read-uint32 ctx))) + (parsed-txins (try! (read-txins (get ctx parsed-version)))) + (parsed-txouts (try! (read-txouts (get ctx parsed-txins)))) + (parsed-locktime (try! (read-uint32 (get ctx parsed-txouts))))) + ;; check if it is a non-segwit transaction? + ;; at least check what happens + (asserts! (is-eq (len tx) (get index (get ctx parsed-locktime))) (err ERR-LEFTOVER-DATA)) + (ok {version: (get uint32 parsed-version), + ins: (get txins parsed-txins), + outs: (get txouts parsed-txouts), + locktime: (get uint32 parsed-locktime)}))) + +;; Parse a Bitcoin block header. +;; Returns a tuple structured as folowed on success: +;; (ok { +;; version: uint, ;; block version, +;; parent: (buff 32), ;; parent block hash, +;; merkle-root: (buff 32), ;; merkle root for all this block's transactions +;; timestamp: uint, ;; UNIX epoch timestamp of this block, in seconds +;; nbits: uint, ;; compact block difficulty representation +;; nonce: uint ;; PoW solution +;; }) +(define-read-only (parse-block-header (headerbuff (buff 80))) + (let ((ctx { txbuff: headerbuff, index: u0}) + (parsed-version (try! (read-uint32 ctx))) + (parsed-parent-hash (try! (read-hashslice (get ctx parsed-version)))) + (parsed-merkle-root (try! (read-hashslice (get ctx parsed-parent-hash)))) + (parsed-timestamp (try! (read-uint32 (get ctx parsed-merkle-root)))) + (parsed-nbits (try! (read-uint32 (get ctx parsed-timestamp)))) + (parsed-nonce (try! (read-uint32 (get ctx parsed-nbits))))) + (ok {version: (get uint32 parsed-version), + parent: (get hashslice parsed-parent-hash), + merkle-root: (get hashslice parsed-merkle-root), + timestamp: (get uint32 parsed-timestamp), + nbits: (get uint32 parsed-nbits), + nonce: (get uint32 parsed-nonce)}))) + +;; MOCK section +(define-constant DEBUG-MODE true) + +(define-map mock-burnchain-header-hashes uint (buff 32)) + +(define-public (mock-add-burnchain-block-header-hash (burn-height uint) (hash (buff 32))) + (ok (map-set mock-burnchain-header-hashes burn-height hash))) + +(define-read-only (get-bc-h-hash (bh uint)) + (if DEBUG-MODE (map-get? mock-burnchain-header-hashes bh) (get-burn-block-info? header-hash bh))) + +;; END MOCK section + +;; Verify that a block header hashes to a burnchain header hash at a given height. +;; Returns true if so; false if not. +(define-read-only (verify-block-header (headerbuff (buff 80)) (expected-block-height uint)) + (match (get-bc-h-hash expected-block-height) + bhh (is-eq bhh (reverse-buff32 (sha256 (sha256 headerbuff)))) + false)) + +;; Get the txid of a transaction, but little-endian. +;; This is the reverse of what you see on block explorers. +(define-read-only (get-reversed-txid (tx (buff 4096))) + (sha256 (sha256 tx))) + +;; Get the txid of a transaction. +;; This is what you see on block explorers. +(define-read-only (get-txid (tx (buff 4096))) + (reverse-buff32 (sha256 (sha256 tx)))) + +;; Determine if the ith bit in a uint is set to 1 +(define-read-only (is-bit-set (val uint) (bit uint)) + (> (bit-and val (bit-shift-left u1 bit)) u0)) + +;; Verify the next step of a Merkle proof. +;; This hashes cur-hash against the ctr-th hash in proof-hashes, and uses that as the next cur-hash. +;; The path is a bitfield describing the walk from the txid up to the merkle root: +;; * if the ith bit is 0, then cur-hash is hashed before the next proof-hash (cur-hash is "left"). +;; * if the ith bit is 1, then the next proof-hash is hashed before cur-hash (cur-hash is "right"). +;; The proof verifies if cur-hash is equal to root-hash, and we're out of proof-hashes to check. +;; Note, ctr is expected to be < (len proof-hashes), verified can be true only if ctr + 1 == (len proof-hashes). +(define-private (inner-merkle-proof-verify (ctr uint) (state { path: uint, root-hash: (buff 32), proof-hashes: (list 14 (buff 32)), tree-depth: uint, cur-hash: (buff 32), verified: bool})) + (let ((path (get path state)) + (is-left (is-bit-set path ctr)) + (proof-hashes (get proof-hashes state)) + (cur-hash (get cur-hash state)) + (root-hash (get root-hash state)) + + (h1 (if is-left (unwrap-panic (element-at proof-hashes ctr)) cur-hash)) + (h2 (if is-left cur-hash (unwrap-panic (element-at proof-hashes ctr)))) + (next-hash (sha256 (sha256 (concat h1 h2)))) + (is-verified (and (is-eq (+ u1 ctr) (len proof-hashes)) (is-eq next-hash root-hash)))) + (merge state { cur-hash: next-hash, verified: is-verified}))) + +;; Verify a Merkle proof, given the _reversed_ txid of a transaction, the merkle root of its block, and a proof consisting of: +;; * The index in the block where the transaction can be found (starting from 0), +;; * The list of hashes that link the txid to the merkle root, +;; * The depth of the block's merkle tree (required because Bitcoin does not identify merkle tree nodes as being leaves or intermediates). +;; The _reversed_ txid is required because that's the order (little-endian) processes them in. +;; The tx-index is required because it tells us the left/right traversals we'd make if we were walking down the tree from root to transaction, +;; and is thus used to deduce the order in which to hash the intermediate hashes with one another to link the txid to the merkle root. +;; Returns (ok true) if the proof is valid. +;; Returns (ok false) if the proof is invalid. +;; Returns (err ERR-PROOF-TOO-SHORT) if the proof's hashes aren't long enough to link the txid to the merkle root. +(define-read-only (verify-merkle-proof (reversed-txid (buff 32)) (merkle-root (buff 32)) (proof { tx-index: uint, hashes: (list 14 (buff 32)), tree-depth: uint})) + (if (> (get tree-depth proof) (len (get hashes proof))) + (err ERR-PROOF-TOO-SHORT) + (ok + (get verified + (fold inner-merkle-proof-verify + (unwrap-panic (slice? (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11 u12 u13) u0 (get tree-depth proof))) + { path: (+ (pow u2 (get tree-depth proof)) (get tx-index proof)), root-hash: merkle-root, proof-hashes: (get hashes proof), cur-hash: reversed-txid, tree-depth: (get tree-depth proof), verified: false}))))) + +;; Helper for wtxid commitments + +;; Gets the scriptPubKey in the last output that follows the 0x6a24aa21a9ed pattern regardless of its content +;; as per BIP-0141 (https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#commitment-structure) +(define-read-only (get-commitment-scriptPubKey (outs (list 8 { value: uint, scriptPubKey: (buff 128) }))) + (fold inner-get-commitment-scriptPubKey outs 0x)) + +(define-read-only (inner-get-commitment-scriptPubKey (out { value: uint, scriptPubKey: (buff 128) }) (result (buff 128))) + (let ((commitment (get scriptPubKey out))) + (if (is-commitment-pattern commitment) commitment result))) + +;; Returns false, if scriptPubKey does not have the commitment prefix. +(define-read-only (is-commitment-pattern (scriptPubKey (buff 128))) + (asserts! (is-eq (unwrap! (slice? scriptPubKey u0 u6) false) 0x6a24aa21a9ed) false)) + +;; +;; Top-level verification functions +;; + +;; Determine whether or not a Bitcoin transaction without witnesses +;; was mined in a prior Bitcoin block. +;; It takes the block height, the transaction, the block header and a merkle proof, and determines that: +;; * the block header corresponds to the block that was mined at the given Bitcoin height +;; * the transaction's merkle proof links it to the block header's merkle root. + +;; To verify that the merkle root is part of the block header there are two options: +;; a) read the merkle root from the header buffer +;; b) build the header buffer from its parts including the merkle root +;; +;; The merkle proof is a list of sibling merkle tree nodes that allow us to calculate the parent node from two children nodes in each merkle tree level, +;; the depth of the block's merkle tree, and the index in the block in which the given transaction can be found (starting from 0). +;; The first element in hashes must be the given transaction's sibling transaction's ID. This and the given transaction's txid are hashed to +;; calculate the parent hash in the merkle tree, which is then hashed with the *next* hash in the proof, and so on and so forth, until the final +;; hash can be compared against the block header's merkle root field. The tx-index tells us in which order to hash each pair of siblings. +;; Note that the proof hashes -- including the sibling txid -- must be _little-endian_ hashes, because this is how Bitcoin generates them. +;; This is the reverse of what you'd see in a block explorer! +;; +;; Returns (ok true) if the proof checks out. +;; Returns (ok false) if not. +;; Returns (err ERR-PROOF-TOO-SHORT) if the proof doesn't contain enough intermediate hash nodes in the merkle tree. +(define-read-only (was-tx-mined-compact (height uint) (tx (buff 4096)) + (header (buff 80)) + (proof { tx-index: uint, hashes: (list 14 (buff 32)), tree-depth: uint})) + (let ((block (unwrap! (parse-block-header header) (err ERR-BAD-HEADER)))) + (was-tx-mined-internal height tx header (get merkle-root block) proof))) + +;; Private function to verify block header and merkle proof. +;; This function must only be called with the merkle root of the provided header. +;; Use was-tx-mined-compact with header as a buffer or +;; was-tx-mined with header as a tuple. +;; Returns txid if tx was mined else err u1 if the header is invalid or err u2 if the proof is invalid. +(define-private (was-tx-mined-internal (height uint) (tx (buff 4096)) (header (buff 80)) (merkle-root (buff 32)) (proof { tx-index: uint, hashes: (list 14 (buff 32)), tree-depth: uint})) + (if (verify-block-header header height) + (let ((reversed-txid (get-reversed-txid tx)) + (txid (reverse-buff32 reversed-txid))) + ;; verify merkle proof + (asserts! + (or + (is-eq merkle-root txid) ;; true, if the transaction is the only transaction + (try! (verify-merkle-proof reversed-txid (reverse-buff32 merkle-root) proof))) + (err ERR-INVALID-MERKLE-PROOF)) + (ok txid)) + (err ERR-HEADER-HEIGHT-MISMATCH))) + + +;; Determine whether or not a Bitcoin transaction +;; with witnesses was mined in a prior Bitcoin block. +;; It takes +;; a) the bitcoin block height, the transaction "tx" with witness data, +;; the bitcoin block header, the tx index in the block and +;; b) the depth of merkle proof of the block and +;; c) the merkle proof of the wtxid "wproof", its root "witness-merkle-proof", +;; the witness reserved value and +;; d) the coinbase transaction "ctx" without witnesses (non-segwit) and its merkle proof "cproof". +;; +;; It determines that: +;; * the block header corresponds to the block that was mined at the given Bitcoin height +;; * the coinbase tx was mined and it contains the commitment to the wtxids +;; * the wtxid of the tx is part of the commitment. +;; +;; The tree depth for wproof and cproof are the same. +;; The coinbase tx index is always 0. +;; +;; It returns (ok wtxid), if it was mined. +(define-read-only (was-segwit-tx-mined-compact + (height uint) + (wtx (buff 4096)) + (header (buff 80)) + (tx-index uint) + (tree-depth uint) + (wproof (list 14 (buff 32))) + (witness-merkle-root (buff 32)) + (witness-reserved-value (buff 32)) + (ctx (buff 1024)) + (cproof (list 14 (buff 32)))) + (begin + ;; verify that the coinbase tx is correct + (try! (was-tx-mined-compact height ctx header { tx-index: u0, hashes: cproof, tree-depth: tree-depth })) + (let ( + (witness-out (get-commitment-scriptPubKey (get outs (try! (parse-tx ctx))))) + (final-hash (sha256 (sha256 (concat witness-merkle-root witness-reserved-value)))) + (reversed-wtxid (get-reversed-txid wtx)) + (wtxid (reverse-buff32 reversed-wtxid)) + ) + ;; verify wtxid commitment + (asserts! (is-eq witness-out (concat 0x6a24aa21a9ed final-hash)) (err ERR-INVALID-COMMITMENT)) + ;; verify witness merkle tree + (asserts! (try! (verify-merkle-proof reversed-wtxid witness-merkle-root + { tx-index: tx-index, hashes: wproof, tree-depth: tree-depth })) (err ERR-WITNESS-TX-NOT-IN-COMMITMENT)) + (ok wtxid)))) diff --git a/components/clarinet-format/tests/golden/comments.clar b/components/clarinet-format/tests/golden/comments.clar new file mode 100644 index 000000000..57c76ad7f --- /dev/null +++ b/components/clarinet-format/tests/golden/comments.clar @@ -0,0 +1,16 @@ +;; max_line_length: 80, indentation: 2 +;; comment +(define-read-only (get-offer (id uint) (w uint)) (map-get? offers-map id) +) +(define-read-only (get-offer) (ok 1)) +;; top comment +;; @ignore-formatting +(define-constant something (+ 1 1)) ;; eol comment + +(define-read-only (something-else) + (begin (+ 1 1) (ok true) + )) + +(define-public (something-else (a uint)) + (begin + (+ 1 1) (ok true))) diff --git a/components/clarinet-format/tests/golden/if.clar b/components/clarinet-format/tests/golden/if.clar new file mode 100644 index 000000000..3d9c00c6d --- /dev/null +++ b/components/clarinet-format/tests/golden/if.clar @@ -0,0 +1,3 @@ +;; max_line_length: 80, indentation: 4 +(let + (ids-to-recipient (if (is-eq no-to-recipient u0) (list ) (unwrap-panic (slice? new-available-ids (- (len new-available-ids) no-to-recipient) (len new-available-ids)))))) diff --git a/components/clarinet-format/tests/golden/match-or.clar b/components/clarinet-format/tests/golden/match-or.clar new file mode 100644 index 000000000..2d4c06df1 --- /dev/null +++ b/components/clarinet-format/tests/golden/match-or.clar @@ -0,0 +1,22 @@ +;; max_line_length: 80, indentation: 2 +;; Determines if a character is a vowel (a, e, i, o, u, and y). +(define-private (is-vowel (char (buff 1))) + (or + (is-eq char 0x61) ;; a + (is-eq char 0x65) ;; e + (is-eq char 0x69) ;; i + (is-eq char 0x6f) ;; o + (is-eq char 0x75) ;; u + (is-eq char 0x79) ;; y + ) +) + +;; pre comment +(define-private (something) + (match opt value (ok (handle-new-value value)) (ok 1)) +) + +(define-read-only (is-borroweable-isolated (asset principal)) + (match (index-of? (contract-call? .pool-reserve-data get-borroweable-isolated-read) asset) + res true + false)) diff --git a/components/clarinet-format/tests/golden/nested_map.clar b/components/clarinet-format/tests/golden/nested_map.clar new file mode 100644 index 000000000..442a96992 --- /dev/null +++ b/components/clarinet-format/tests/golden/nested_map.clar @@ -0,0 +1,27 @@ +;; max_line_length: 80, indentation: 2 +(define-public (mng-name-register) + (map-set name-properties + { + name: name, namespace: namespace + } + { + registered-at: (some burn-block-height), + imported-at: none, + hashed-salted-fqn-preorder: (some hashed-salted-fqn), + preordered-by: (some send-to), + ;; Updated this to be u0, so that renewals are handled through the namespace manager + renewal-height: u0, + stx-burn: u0, + owner: send-to, + } + ) + (print + { + topic: "new-name", + owner: send-to, + name: {name: name, namespace: namespace}, + id: id-to-be-minted, + properties: (map-get? name-properties {name: name, namespace: namespace}) + } + ) +) diff --git a/components/clarinet-format/tests/golden/test.clar b/components/clarinet-format/tests/golden/test.clar new file mode 100644 index 000000000..2116b8d23 --- /dev/null +++ b/components/clarinet-format/tests/golden/test.clar @@ -0,0 +1,14 @@ +;; max_line_length: 80, indentation: 2 +;; private functions +;; #[allow(unchecked_data)] +(define-private (complete-individual-deposits-helper (deposit {txid: (buff 32), vout-index: uint, amount: uint, recipient: principal, burn-hash: (buff 32), burn-height: uint, sweep-txid: (buff 32)}) (helper-response (response uint uint))) + (match helper-response + index + (begin + (try! (unwrap! (complete-deposit-wrapper (get txid deposit) (get vout-index deposit) (get amount deposit) (get recipient deposit) (get burn-hash deposit) (get burn-height deposit) (get sweep-txid deposit)) (err (+ ERR_DEPOSIT_INDEX_PREFIX (+ u10 index))))) + (ok (+ index u1)) + ) + err-response + (err err-response) + ) +) diff --git a/components/clarinet-format/tests/golden/traits.clar b/components/clarinet-format/tests/golden/traits.clar new file mode 100644 index 000000000..a311ba316 --- /dev/null +++ b/components/clarinet-format/tests/golden/traits.clar @@ -0,0 +1,5 @@ +;; max_line_length: 80, indentation: 2 +(use-trait token-a-trait 'SPAXYA5XS51713FDTQ8H94EJ4V579CXMTRNBZKSF.token-a.token-trait) +(define-public (forward-get-balance (user principal) (contract )) + (begin + (ok (contract-of contract)))) ;; returns the principal of the contract implementing diff --git a/components/clarinet-format/tests/golden/tuple.clar b/components/clarinet-format/tests/golden/tuple.clar new file mode 100644 index 000000000..f20d86272 --- /dev/null +++ b/components/clarinet-format/tests/golden/tuple.clar @@ -0,0 +1,19 @@ +;; max_line_length: 80, indentation: 2 +(define-public (set-user-reserve + (user principal) + (asset principal) ;; comment + (state + (tuple + (principal-borrow-balance uint) + (last-variable-borrow-cumulative-index uint) + (origination-fee uint) + (stable-borrow-rate uint) + (last-updated-block uint) ;; comment + (use-as-collateral bool) + ) + )) + (begin + (asserts! (is-lending-pool contract-caller) ERR_UNAUTHORIZED) + (contract-call? .pool-reserve-data set-user-reserve-data user asset state) + ) +) diff --git a/test.clar b/test.clar new file mode 100644 index 000000000..955ad2847 --- /dev/null +++ b/test.clar @@ -0,0 +1,9 @@ +;; comment +(slice? "blockstack" u5 u10) ;; Returns (some "stack") +(slice? (list 1 2 3 4 5) u5 u9) ;; Returns none +(slice? (list 1 2 3 4 5) u3 u4) ;; Returns (some (4)) +(slice? "abcd" u1 u3) ;; Returns (some "bc") +(slice? "abcd" u2 u2) ;; Returns (some "") +(slice? "abcd" u3 u1) ;; Returns none +;; whatever +;;asdf asdf