diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 7ed164826..81a61a060 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -55,6 +55,8 @@ At the same time, developers may also add end-to-end tests with sqllogictest. Yo
 `tests/sql` and write sqllogictest to run SQLs in RisingLight and to verify implementation correctness.
 All the files suffix with `.slt` but not prefix with `_` in `tests/sql` will be automatically included in the end-to-end tests.
 
+See [SQLLogicTest and SQLPlannerTest](docs/05-e2e-tests.md) for more information.
+
 You'll need `cargo install cargo-nextest` to run tests.
 
 ## Running Test and Checks
diff --git a/Cargo.lock b/Cargo.lock
index 65bcdfe79..0626de68d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2,6 +2,21 @@
 # It is not intended for manual editing.
 version = 3
 
+[[package]]
+name = "addr2line"
+version = "0.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b"
+dependencies = [
+ "gimli",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+
 [[package]]
 name = "aho-corasick"
 version = "0.7.18"
@@ -25,6 +40,9 @@ name = "anyhow"
 version = "1.0.56"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27"
+dependencies = [
+ "backtrace",
+]
 
 [[package]]
 name = "arrayvec"
@@ -120,6 +138,21 @@ version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
 
+[[package]]
+name = "backtrace"
+version = "0.3.65"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61"
+dependencies = [
+ "addr2line",
+ "cc",
+ "cfg-if 1.0.0",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "rustc-demangle",
+]
+
 [[package]]
 name = "binary-heap-plus"
 version = "0.4.1"
@@ -384,7 +417,9 @@ dependencies = [
  "encode_unicode",
  "libc",
  "once_cell",
+ "regex",
  "terminal_size",
+ "unicode-width",
  "winapi",
 ]
 
@@ -892,6 +927,12 @@ dependencies = [
  "wasi 0.10.2+wasi-snapshot-preview1",
 ]
 
+[[package]]
+name = "gimli"
+version = "0.26.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4"
+
 [[package]]
 name = "glob"
 version = "0.3.0"
@@ -1046,6 +1087,12 @@ dependencies = [
  "termcolor",
 ]
 
+[[package]]
+name = "linked-hash-map"
+version = "0.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3"
+
 [[package]]
 name = "linux-raw-sys"
 version = "0.0.42"
@@ -1159,6 +1206,15 @@ dependencies = [
  "syn",
 ]
 
+[[package]]
+name = "miniz_oxide"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc"
+dependencies = [
+ "adler",
+]
+
 [[package]]
 name = "minstant"
 version = "0.1.1"
@@ -1290,6 +1346,15 @@ version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
 
+[[package]]
+name = "object"
+version = "0.28.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424"
+dependencies = [
+ "memchr",
+]
+
 [[package]]
 name = "once_cell"
 version = "1.10.0"
@@ -1761,6 +1826,17 @@ dependencies = [
  "tracing-subscriber",
 ]
 
+[[package]]
+name = "risinglight_plannertest"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "async-trait",
+ "risinglight",
+ "sqlplannertest",
+ "tokio",
+]
+
 [[package]]
 name = "risinglight_proto"
 version = "0.1.3"
@@ -1795,6 +1871,12 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "rustc-demangle"
+version = "0.1.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
+
 [[package]]
 name = "rustc_version"
 version = "0.4.0"
@@ -1928,6 +2010,18 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "serde_yaml"
+version = "0.8.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc"
+dependencies = [
+ "indexmap",
+ "ryu",
+ "serde",
+ "yaml-rust",
+]
+
 [[package]]
 name = "sharded-slab"
 version = "0.1.4"
@@ -1946,6 +2040,12 @@ dependencies = [
  "libc",
 ]
 
+[[package]]
+name = "similar"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e24979f63a11545f5f2c60141afe249d4f19f84581ea2138065e400941d83d3"
+
 [[package]]
 name = "skeptic"
 version = "0.13.7"
@@ -2011,6 +2111,23 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "sqlplannertest"
+version = "0.1.0"
+source = "git+https://github.com/risinglightdb/sqlplannertest-rs#426fdd91e58cec6fa0ad7a1de3d075c9b30faabd"
+dependencies = [
+ "anyhow",
+ "async-trait",
+ "console",
+ "futures-util",
+ "glob",
+ "libtest-mimic",
+ "serde",
+ "serde_yaml",
+ "similar",
+ "tokio",
+]
+
 [[package]]
 name = "stable_deref_trait"
 version = "1.2.0"
@@ -2624,3 +2741,12 @@ checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e"
 dependencies = [
  "tap",
 ]
+
+[[package]]
+name = "yaml-rust"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85"
+dependencies = [
+ "linked-hash-map",
+]
diff --git a/Cargo.toml b/Cargo.toml
index d31539c06..af55f9057 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -84,7 +84,8 @@ lto = 'thin'
 [workspace]
 members = [
   "proto",
-  "tests/sqllogictest"
+  "tests/sqllogictest",
+  "tests/sqlplannertest"
 ]
 
 [patch.crates-io]
diff --git a/Makefile b/Makefile
index d23088442..62123cd3b 100644
--- a/Makefile
+++ b/Makefile
@@ -26,6 +26,9 @@ test:
 
 check: fmt_check clippy_check build test docs_check
 
+apply_planner_test:
+	cargo run -p risinglight_plannertest --bin apply-planner-test
+
 clean:
 	cargo clean
 	rm -rf $(TPCH_DBGEN_PATH)
diff --git a/docs/05-e2e-tests.md b/docs/05-e2e-tests.md
new file mode 100644
index 000000000..4e65bd0ed
--- /dev/null
+++ b/docs/05-e2e-tests.md
@@ -0,0 +1,51 @@
+# SQLLogicTest and SQLPlannerTest
+
+RisingLight uses two test frameworks to do end-to-end tests.
+
+## SQLLogicTest
+
+SQLLogicTest will run a special `slt` file and compare the result from the expected output.
+The test cases are stored under `tests/sql` folder.
+
+For example, let's see `order_by.slt`:
+
+```
+statement ok
+create table t(v1 int not null, v2 int not null)
+
+statement ok
+insert into t values(1, 1), (4, 2), (3, 3), (10, 12), (2, 5)
+
+query I
+select v1 from t order by v1 asc
+----
+1
+2
+3
+4
+10
+```
+
+The first 3 test cases of this test file are
+* check whether create table works
+* check whether insert table works 
+* select data from table
+
+We use `statement ok` to ensure statement successfully runs, and `query` to compare the query result.
+
+When running `make test`, the test runner will run all files under `tests/sql` folder.
+
+## SQLPlannerTest
+
+SQLPlannerTest is a regression test. We will write yaml files to describe the cases we want to test.
+The test cases are stored in `tests/planner_test`. Use the following command:
+
+```
+make apply_planner_test
+```
+
+to generate a sql file containing explain results for each yaml file.
+
+Generally, we will compare the explain result before and after a commit, so as to know how the commit
+affects the optimizer result. We don't really care about the correctness -- we just compare the explain
+result before and after a PR.
\ No newline at end of file
diff --git a/src/storage/error.rs b/src/storage/error.rs
index 25bf5c904..8c31f6443 100644
--- a/src/storage/error.rs
+++ b/src/storage/error.rs
@@ -76,7 +76,7 @@ impl From<Arc<TracedStorageError>> for TracedStorageError {
 
 /// [`StorageResult`] with backtrace.
 #[derive(Error)]
-#[error("{source:?}\n{backtrace:#}")]
+#[error("{source:?}\n{backtrace}")]
 pub struct TracedStorageError {
     #[from]
     source: StorageError,
diff --git a/src/storage/secondary/delete_vector.rs b/src/storage/secondary/delete_vector.rs
index 8341d1ab9..4c5921a60 100644
--- a/src/storage/secondary/delete_vector.rs
+++ b/src/storage/secondary/delete_vector.rs
@@ -3,6 +3,7 @@
 use std::path::Path;
 
 use bitvec::prelude::BitVec;
+use futures::pin_mut;
 use itertools::Itertools;
 use prost::Message;
 use risinglight_proto::rowset::DeleteRecord;
@@ -42,9 +43,10 @@ impl DeleteVector {
     }
 
     pub async fn write_all(
-        file: &mut tokio::fs::File,
+        file: impl tokio::io::AsyncWrite,
         deletes: &[DeleteRecord],
     ) -> StorageResult<()> {
+        pin_mut!(file);
         let mut data = Vec::new();
         for delete in deletes {
             delete.encode_length_delimited(&mut data)?;
diff --git a/src/storage/secondary/manifest.rs b/src/storage/secondary/manifest.rs
index f1260a4c7..8b8ea9dcf 100644
--- a/src/storage/secondary/manifest.rs
+++ b/src/storage/secondary/manifest.rs
@@ -76,11 +76,19 @@ pub enum ManifestOperation {
 
 /// Handles all reads and writes to a manifest file
 pub struct Manifest {
-    file: tokio::fs::File,
+    file: Option<tokio::fs::File>,
     enable_fsync: bool,
 }
 
 impl Manifest {
+    /// Create a mock manifest
+    pub fn new_mock() -> Self {
+        Self {
+            file: None,
+            enable_fsync: false,
+        }
+    }
+
     pub async fn open(path: impl AsRef<Path>, enable_fsync: bool) -> StorageResult<Self> {
         let file = OpenOptions::default()
             .read(true)
@@ -88,13 +96,22 @@ impl Manifest {
             .create(true)
             .open(path.as_ref())
             .await?;
-        Ok(Self { file, enable_fsync })
+        Ok(Self {
+            file: Some(file),
+            enable_fsync,
+        })
     }
 
     pub async fn replay(&mut self) -> StorageResult<Vec<ManifestOperation>> {
+        let file = if let Some(file) = &mut self.file {
+            file
+        } else {
+            return Ok(vec![]);
+        };
+
         let mut data = String::new();
-        self.file.seek(SeekFrom::Start(0)).await?;
-        let mut reader = BufReader::new(&mut self.file);
+        file.seek(SeekFrom::Start(0)).await?;
+        let mut reader = BufReader::new(file);
 
         // TODO: don't read all to memory
         reader.read_to_string(&mut data).await?;
@@ -131,15 +148,21 @@ impl Manifest {
     }
 
     pub async fn append(&mut self, entries: &[ManifestOperation]) -> StorageResult<()> {
+        let file = if let Some(file) = &mut self.file {
+            file
+        } else {
+            return Ok(());
+        };
+
         let mut json = Vec::new();
         serde_json::to_writer(&mut json, &ManifestOperation::Begin)?;
         for entry in entries {
             serde_json::to_writer(&mut json, entry)?;
         }
         serde_json::to_writer(&mut json, &ManifestOperation::End)?;
-        self.file.write_all(&json).await?;
+        file.write_all(&json).await?;
         if self.enable_fsync {
-            self.file.sync_data().await?;
+            file.sync_data().await?;
         }
         Ok(())
     }
diff --git a/src/storage/secondary/options.rs b/src/storage/secondary/options.rs
index a077d7ed8..0807384b2 100644
--- a/src/storage/secondary/options.rs
+++ b/src/storage/secondary/options.rs
@@ -56,6 +56,9 @@ pub struct StorageOptions {
 
     /// Whether record first_key of each block into block_index
     pub record_first_key: bool,
+
+    /// Whether to disable all disk operations, only for test use
+    pub disable_all_disk_operation: bool,
 }
 
 impl StorageOptions {
@@ -74,12 +77,13 @@ impl StorageOptions {
             checksum_type: ChecksumType::Crc32,
             is_rle: false,
             record_first_key: false,
+            disable_all_disk_operation: false,
         }
     }
 
-    pub fn default_for_test(path: PathBuf) -> Self {
+    pub fn default_for_test() -> Self {
         Self {
-            path,
+            path: PathBuf::from("_inaccessible_directory"),
             cache_size: 1024,
             target_rowset_size: 1 << 20,       // 1MB
             target_block_size: 16 * (1 << 10), // 16KB
@@ -87,6 +91,7 @@ impl StorageOptions {
             checksum_type: ChecksumType::None,
             is_rle: false,
             record_first_key: false,
+            disable_all_disk_operation: true,
         }
     }
 }
diff --git a/src/storage/secondary/storage.rs b/src/storage/secondary/storage.rs
index 2faa34c63..6e41d8e6f 100644
--- a/src/storage/secondary/storage.rs
+++ b/src/storage/secondary/storage.rs
@@ -22,21 +22,27 @@ impl SecondaryStorage {
         let catalog = RootCatalog::new();
         let tables = HashMap::new();
 
-        // create folder if not exist
-        if fs::metadata(&options.path).await.is_err() {
-            info!("create db directory at {:?}", options.path);
-            fs::create_dir(&options.path).await?;
-        }
+        if !options.disable_all_disk_operation {
+            // create folder if not exist
+            if fs::metadata(&options.path).await.is_err() {
+                info!("create db directory at {:?}", options.path);
+                fs::create_dir(&options.path).await?;
+            }
 
-        // create DV folder if not exist
-        let dv_directory = options.path.join("dv");
-        if fs::metadata(&dv_directory).await.is_err() {
-            fs::create_dir(&dv_directory).await?;
+            // create DV folder if not exist
+            let dv_directory = options.path.join("dv");
+            if fs::metadata(&dv_directory).await.is_err() {
+                fs::create_dir(&dv_directory).await?;
+            }
         }
 
         let enable_fsync = !matches!(options.io_backend, IOBackend::InMemory(_));
 
-        let mut manifest = Manifest::open(options.path.join("manifest.json"), enable_fsync).await?;
+        let mut manifest = if options.disable_all_disk_operation {
+            Manifest::new_mock()
+        } else {
+            Manifest::open(options.path.join("manifest.json"), enable_fsync).await?
+        };
 
         let manifest_ops = manifest.replay().await?;
 
@@ -105,20 +111,22 @@ impl SecondaryStorage {
 
         let mut changeset = vec![];
 
-        // vacuum unused RowSets
-        let mut dir = fs::read_dir(&options.path).await?;
-        while let Some(entry) = dir.next_entry().await? {
-            if entry.path().is_dir() {
-                if let Some((table_id, rowset_id)) =
-                    entry.file_name().to_str().unwrap().split_once('_')
-                {
-                    if let (Ok(table_id), Ok(rowset_id)) =
-                        (table_id.parse::<u32>(), rowset_id.parse::<u32>())
+        if !options.disable_all_disk_operation {
+            // vacuum unused RowSets
+            let mut dir = fs::read_dir(&options.path).await?;
+            while let Some(entry) = dir.next_entry().await? {
+                if entry.path().is_dir() {
+                    if let Some((table_id, rowset_id)) =
+                        entry.file_name().to_str().unwrap().split_once('_')
                     {
-                        if !rowsets_to_open.contains_key(&(table_id, rowset_id)) {
-                            fs::remove_dir_all(entry.path())
-                                .await
-                                .expect("failed to vacuum unused rowsets");
+                        if let (Ok(table_id), Ok(rowset_id)) =
+                            (table_id.parse::<u32>(), rowset_id.parse::<u32>())
+                        {
+                            if !rowsets_to_open.contains_key(&(table_id, rowset_id)) {
+                                fs::remove_dir_all(entry.path())
+                                    .await
+                                    .expect("failed to vacuum unused rowsets");
+                            }
                         }
                     }
                 }
diff --git a/src/storage/secondary/transaction.rs b/src/storage/secondary/transaction.rs
index 2f8b1bd84..ea934f834 100644
--- a/src/storage/secondary/transaction.rs
+++ b/src/storage/secondary/transaction.rs
@@ -138,13 +138,27 @@ impl SecondaryTransaction {
         let mut dvs = vec![];
         for (rowset_id, deletes) in delete_split_map {
             let dv_id = self.table.generate_dv_id();
-            let mut file = tokio::fs::OpenOptions::default()
-                .write(true)
-                .create_new(true)
-                .open(self.table.get_dv_path(rowset_id, dv_id))
-                .await?;
-            DeleteVector::write_all(&mut file, &deletes).await?;
-            file.sync_data().await?;
+            use bytes::Bytes;
+
+            use super::IOBackend;
+            let path = self.table.get_dv_path(rowset_id, dv_id);
+            match &self.table.storage_options.io_backend {
+                IOBackend::InMemory(map) => {
+                    let mut buf = vec![];
+                    DeleteVector::write_all(&mut buf, &deletes).await?;
+                    let mut guard = map.lock();
+                    guard.insert(path, Bytes::from(buf));
+                }
+                _ => {
+                    let mut file = tokio::fs::OpenOptions::default()
+                        .write(true)
+                        .create_new(true)
+                        .open(path)
+                        .await?;
+                    DeleteVector::write_all(&mut file, &deletes).await?;
+                    file.sync_data().await?;
+                }
+            }
             dvs.push(DeleteVector::new(dv_id, rowset_id, deletes));
         }
 
@@ -310,7 +324,10 @@ impl SecondaryTransaction {
         if self.mem.is_none() {
             let rowset_id = self.table.generate_rowset_id();
             let directory = self.table.get_rowset_path(rowset_id);
-            tokio::fs::create_dir(&directory).await?;
+
+            if !self.table.storage_options.disable_all_disk_operation {
+                tokio::fs::create_dir(&directory).await?;
+            }
 
             self.mem = Some(SecondaryMemRowsetImpl::new(
                 self.table.columns.clone(),
diff --git a/src/storage/secondary/version_manager.rs b/src/storage/secondary/version_manager.rs
index 2e751fb13..81666ce72 100644
--- a/src/storage/secondary/version_manager.rs
+++ b/src/storage/secondary/version_manager.rs
@@ -344,7 +344,9 @@ impl VersionManager {
                 .path
                 .join(format!("{}_{}", table_id, rowset_id));
             info!("vacuum {}_{}", table_id, rowset_id);
-            tokio::fs::remove_dir_all(path).await?;
+            if !self.storage_options.disable_all_disk_operation {
+                tokio::fs::remove_dir_all(path).await?;
+            }
         }
 
         Ok(())
diff --git a/tests/planner_test/count.planner.sql b/tests/planner_test/count.planner.sql
new file mode 100644
index 000000000..dd8587a3b
--- /dev/null
+++ b/tests/planner_test/count.planner.sql
@@ -0,0 +1,32 @@
+-- count(*) is special
+explain select count(*) from t
+
+/*
+PhysicalProjection:
+    InputRef #0
+  PhysicalSimpleAgg:
+      count(InputRef #0) -> INT
+    PhysicalTableScan:
+        table #0,
+        columns [],
+        with_row_handler: true,
+        is_sorted: false,
+        expr: None
+*/
+
+-- count(*) with projection
+explain select count(*) + 1 from t
+
+/*
+PhysicalProjection:
+    (InputRef #0 + 1)
+  PhysicalSimpleAgg:
+      count(InputRef #0) -> INT
+    PhysicalTableScan:
+        table #0,
+        columns [],
+        with_row_handler: true,
+        is_sorted: false,
+        expr: None
+*/
+
diff --git a/tests/planner_test/count.yml b/tests/planner_test/count.yml
new file mode 100644
index 000000000..104c3b9de
--- /dev/null
+++ b/tests/planner_test/count.yml
@@ -0,0 +1,15 @@
+- sql: |
+    explain select count(*) from t
+  desc: count(*) is special
+  before:
+    - create table t(v int)
+  tasks:
+    - print
+
+- sql: |
+    explain select count(*) + 1 from t
+  desc: count(*) with projection
+  before:
+    - create table t(v int)
+  tasks:
+    - print
diff --git a/tests/planner_test/tpch.planner.sql b/tests/planner_test/tpch.planner.sql
new file mode 100644
index 000000000..295447fab
--- /dev/null
+++ b/tests/planner_test/tpch.planner.sql
@@ -0,0 +1,413 @@
+-- prepare
+CREATE TABLE NATION  (
+    N_NATIONKEY  INTEGER NOT NULL,
+    N_NAME       CHAR(25) NOT NULL,
+    N_REGIONKEY  INTEGER NOT NULL,
+    N_COMMENT    VARCHAR(152)
+);
+
+CREATE TABLE REGION  (
+    R_REGIONKEY  INTEGER NOT NULL,
+    R_NAME       CHAR(25) NOT NULL,
+    R_COMMENT    VARCHAR(152)
+);
+
+CREATE TABLE PART  (
+    P_PARTKEY     INTEGER NOT NULL,
+    P_NAME        VARCHAR(55) NOT NULL,
+    P_MFGR        CHAR(25) NOT NULL,
+    P_BRAND       CHAR(10) NOT NULL,
+    P_TYPE        VARCHAR(25) NOT NULL,
+    P_SIZE        INTEGER NOT NULL,
+    P_CONTAINER   CHAR(10) NOT NULL,
+    P_RETAILPRICE DECIMAL(15,2) NOT NULL,
+    P_COMMENT     VARCHAR(23) NOT NULL
+);
+
+CREATE TABLE SUPPLIER (
+    S_SUPPKEY     INTEGER NOT NULL,
+    S_NAME        CHAR(25) NOT NULL,
+    S_ADDRESS     VARCHAR(40) NOT NULL,
+    S_NATIONKEY   INTEGER NOT NULL,
+    S_PHONE       CHAR(15) NOT NULL,
+    S_ACCTBAL     DECIMAL(15,2) NOT NULL,
+    S_COMMENT     VARCHAR(101) NOT NULL
+);
+
+CREATE TABLE PARTSUPP (
+    PS_PARTKEY     INTEGER NOT NULL,
+    PS_SUPPKEY     INTEGER NOT NULL,
+    PS_AVAILQTY    INTEGER NOT NULL,
+    PS_SUPPLYCOST  DECIMAL(15,2)  NOT NULL,
+    PS_COMMENT     VARCHAR(199) NOT NULL
+);
+
+CREATE TABLE CUSTOMER (
+    C_CUSTKEY     INTEGER NOT NULL,
+    C_NAME        VARCHAR(25) NOT NULL,
+    C_ADDRESS     VARCHAR(40) NOT NULL,
+    C_NATIONKEY   INTEGER NOT NULL,
+    C_PHONE       CHAR(15) NOT NULL,
+    C_ACCTBAL     DECIMAL(15,2)   NOT NULL,
+    C_MKTSEGMENT  CHAR(10) NOT NULL,
+    C_COMMENT     VARCHAR(117) NOT NULL
+);
+
+CREATE TABLE ORDERS (
+    O_ORDERKEY       INTEGER NOT NULL,
+    O_CUSTKEY        INTEGER NOT NULL,
+    O_ORDERSTATUS    CHAR(1) NOT NULL,
+    O_TOTALPRICE     DECIMAL(15,2) NOT NULL,
+    O_ORDERDATE      DATE NOT NULL,
+    O_ORDERPRIORITY  CHAR(15) NOT NULL,  
+    O_CLERK          CHAR(15) NOT NULL, 
+    O_SHIPPRIORITY   INTEGER NOT NULL,
+    O_COMMENT        VARCHAR(79) NOT NULL
+);
+
+CREATE TABLE LINEITEM (
+    L_ORDERKEY      INTEGER NOT NULL,
+    L_PARTKEY       INTEGER NOT NULL,
+    L_SUPPKEY       INTEGER NOT NULL,
+    L_LINENUMBER    INTEGER NOT NULL,
+    L_QUANTITY      DECIMAL(15,2) NOT NULL,
+    L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL,
+    L_DISCOUNT      DECIMAL(15,2) NOT NULL,
+    L_TAX           DECIMAL(15,2) NOT NULL,
+    L_RETURNFLAG    CHAR(1) NOT NULL,
+    L_LINESTATUS    CHAR(1) NOT NULL,
+    L_SHIPDATE      DATE NOT NULL,
+    L_COMMITDATE    DATE NOT NULL,
+    L_RECEIPTDATE   DATE NOT NULL,
+    L_SHIPINSTRUCT  CHAR(25) NOT NULL,
+    L_SHIPMODE      CHAR(10) NOT NULL,
+    L_COMMENT       VARCHAR(44) NOT NULL
+);
+
+/*
+
+*/
+
+-- tpch-q1: TPC-H Q1
+explain select
+    l_returnflag,
+    l_linestatus,
+    sum(l_quantity) as sum_qty,
+    sum(l_extendedprice) as sum_base_price,
+    sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
+    sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
+    avg(l_quantity) as avg_qty,
+    avg(l_extendedprice) as avg_price,
+    avg(l_discount) as avg_disc,
+    count(*) as count_order
+from
+    lineitem
+where
+    l_shipdate <= date '1998-12-01' - interval '71' day
+group by
+    l_returnflag,
+    l_linestatus
+order by
+    l_returnflag,
+    l_linestatus;
+
+/*
+PhysicalOrder:
+    [InputRef #0 (asc), InputRef #1 (asc)]
+  PhysicalProjection:
+      InputRef #0
+      InputRef #1
+      InputRef #2 (alias to sum_qty)
+      InputRef #3 (alias to sum_base_price)
+      InputRef #4 (alias to sum_disc_price)
+      InputRef #5 (alias to sum_charge)
+      (InputRef #2 / InputRef #7) (alias to avg_qty)
+      (InputRef #3 / InputRef #9) (alias to avg_price)
+      (InputRef #10 / InputRef #11) (alias to avg_disc)
+      InputRef #12 (alias to count_order)
+    PhysicalHashAgg:
+        InputRef #1
+        InputRef #2
+        sum(InputRef #3) -> NUMERIC(15,2)
+        sum(InputRef #4) -> NUMERIC(15,2)
+        sum((InputRef #4 * (1 - InputRef #5))) -> NUMERIC(15,2) (null)
+        sum(((InputRef #4 * (1 - InputRef #5)) * (1 + InputRef #6))) -> NUMERIC(15,2) (null)
+        sum(InputRef #3) -> NUMERIC(15,2)
+        count(InputRef #3) -> INT
+        sum(InputRef #4) -> NUMERIC(15,2)
+        count(InputRef #4) -> INT
+        sum(InputRef #5) -> NUMERIC(15,2)
+        count(InputRef #5) -> INT
+        count(InputRef #0) -> INT
+      PhysicalTableScan:
+          table #7,
+          columns [10, 8, 9, 4, 5, 6, 7],
+          with_row_handler: false,
+          is_sorted: false,
+          expr: LtEq(InputRef #0, Date(Date(10490)) (const))
+*/
+
+-- tpch-q3: TPC-H Q3
+explain select
+    l_orderkey,
+    sum(l_extendedprice * (1 - l_discount)) as revenue,
+    o_orderdate,
+    o_shippriority
+from
+    customer,
+    orders,
+    lineitem
+where
+    c_mktsegment = 'BUILDING'
+    and c_custkey = o_custkey
+    and l_orderkey = o_orderkey
+    and o_orderdate < date '1995-03-15'
+    and l_shipdate > date '1995-03-15'
+group by
+    l_orderkey,
+    o_orderdate,
+    o_shippriority
+order by
+    revenue desc,
+    o_orderdate
+limit 10;
+
+/*
+PhysicalTopN: offset: 0, limit: 10, order by [InputRef #1 (desc), InputRef #2 (asc)]
+  PhysicalProjection:
+      InputRef #0
+      InputRef #3 (alias to revenue)
+      InputRef #1
+      InputRef #2
+    PhysicalHashAgg:
+        InputRef #6
+        InputRef #4
+        InputRef #5
+        sum((InputRef #8 * (1 - InputRef #9))) -> NUMERIC(15,2) (null)
+      PhysicalHashJoin:
+          op Inner,
+          predicate: Eq(InputRef #3, InputRef #6)
+        PhysicalHashJoin:
+            op Inner,
+            predicate: Eq(InputRef #1, InputRef #2)
+          PhysicalTableScan:
+              table #5,
+              columns [6, 0],
+              with_row_handler: false,
+              is_sorted: false,
+              expr: Eq(InputRef #0, String("BUILDING") (const))
+          PhysicalTableScan:
+              table #6,
+              columns [1, 0, 4, 7],
+              with_row_handler: false,
+              is_sorted: false,
+              expr: Lt(InputRef #2, Date(Date(9204)) (const))
+        PhysicalTableScan:
+            table #7,
+            columns [0, 10, 5, 6],
+            with_row_handler: false,
+            is_sorted: false,
+            expr: Gt(InputRef #1, Date(Date(9204)) (const))
+*/
+
+-- tpch-q5: TPC-H Q5
+explain select
+    n_name,
+    sum(l_extendedprice * (1 - l_discount)) as revenue
+from
+    customer,
+    orders,
+    lineitem,
+    supplier,
+    nation,
+    region
+where
+    c_custkey = o_custkey
+    and l_orderkey = o_orderkey
+    and l_suppkey = s_suppkey
+    and c_nationkey = s_nationkey
+    and s_nationkey = n_nationkey
+    and n_regionkey = r_regionkey
+    and r_name = 'AFRICA'
+    and o_orderdate >= date '1994-01-01'
+    and o_orderdate < date '1994-01-01' + interval '1' year
+group by
+    n_name
+order by
+    revenue desc;
+
+/*
+PhysicalOrder:
+    [InputRef #1 (desc)]
+  PhysicalProjection:
+      InputRef #0
+      InputRef #1 (alias to revenue)
+    PhysicalHashAgg:
+        InputRef #13
+        sum((InputRef #7 * (1 - InputRef #8))) -> NUMERIC(15,2) (null)
+      PhysicalHashJoin:
+          op Inner,
+          predicate: Eq(InputRef #12, InputRef #14)
+        PhysicalHashJoin:
+            op Inner,
+            predicate: Eq(InputRef #10, InputRef #11)
+          PhysicalHashJoin:
+              op Inner,
+              predicate: And(Eq(InputRef #6, InputRef #9), Eq(InputRef #1, InputRef #10))
+            PhysicalHashJoin:
+                op Inner,
+                predicate: Eq(InputRef #3, InputRef #5)
+              PhysicalHashJoin:
+                  op Inner,
+                  predicate: Eq(InputRef #0, InputRef #2)
+                PhysicalTableScan:
+                    table #5,
+                    columns [0, 3],
+                    with_row_handler: false,
+                    is_sorted: false,
+                    expr: None
+                PhysicalTableScan:
+                    table #6,
+                    columns [1, 0, 4],
+                    with_row_handler: false,
+                    is_sorted: false,
+                    expr: And(GtEq(InputRef #2, Date(Date(8766)) (const)), Lt(InputRef #2, Date(Date(9131)) (const)))
+              PhysicalTableScan:
+                  table #7,
+                  columns [0, 2, 5, 6],
+                  with_row_handler: false,
+                  is_sorted: false,
+                  expr: None
+            PhysicalTableScan:
+                table #3,
+                columns [0, 3],
+                with_row_handler: false,
+                is_sorted: false,
+                expr: None
+          PhysicalTableScan:
+              table #0,
+              columns [0, 2, 1],
+              with_row_handler: false,
+              is_sorted: false,
+              expr: None
+        PhysicalTableScan:
+            table #1,
+            columns [0, 1],
+            with_row_handler: false,
+            is_sorted: false,
+            expr: Eq(InputRef #1, String("AFRICA") (const))
+*/
+
+-- tpch-q6
+explain select
+    sum(l_extendedprice * l_discount) as revenue
+from
+    lineitem
+where
+    l_shipdate >= date '1994-01-01'
+    and l_shipdate < date '1994-01-01' + interval '1' year
+    and l_discount between 0.08 - 0.01 and 0.08 + 0.01
+    and l_quantity < 24;
+
+/*
+PhysicalProjection:
+    InputRef #0 (alias to revenue)
+  PhysicalSimpleAgg:
+      sum((InputRef #3 * InputRef #1)) -> NUMERIC(15,2) (null)
+    PhysicalTableScan:
+        table #7,
+        columns [10, 6, 4, 5],
+        with_row_handler: false,
+        is_sorted: false,
+        expr: And(And(And(GtEq(InputRef #0, Date(Date(8766)) (const)), Lt(InputRef #0, Date(Date(9131)) (const))), And(GtEq(InputRef #1, Decimal(0.07) (const)), LtEq(InputRef #1, Decimal(0.09) (const)))), Lt(InputRef #2, Decimal(24) (const)))
+*/
+
+-- tpch-q10: TPC-H Q10
+explain select
+    c_custkey,
+    c_name,
+    sum(l_extendedprice * (1 - l_discount)) as revenue,
+    c_acctbal,
+    n_name,
+    c_address,
+    c_phone,
+    c_comment
+from
+    customer,
+    orders,
+    lineitem,
+    nation
+where
+    c_custkey = o_custkey
+    and l_orderkey = o_orderkey
+    and o_orderdate >= date '1993-10-01'
+    and o_orderdate < date '1993-10-01' + interval '3' month
+    and l_returnflag = 'R'
+    and c_nationkey = n_nationkey
+group by
+    c_custkey,
+    c_name,
+    c_acctbal,
+    c_phone,
+    n_name,
+    c_address,
+    c_comment
+order by
+    revenue desc
+limit 20;
+
+/*
+PhysicalTopN: offset: 0, limit: 20, order by [InputRef #2 (desc)]
+  PhysicalProjection:
+      InputRef #0
+      InputRef #1
+      InputRef #7 (alias to revenue)
+      InputRef #2
+      InputRef #4
+      InputRef #5
+      InputRef #3
+      InputRef #6
+    PhysicalHashAgg:
+        InputRef #0
+        InputRef #2
+        InputRef #3
+        InputRef #5
+        InputRef #15
+        InputRef #4
+        InputRef #6
+        sum((InputRef #12 * (1 - InputRef #13))) -> NUMERIC(15,2) (null)
+      PhysicalHashJoin:
+          op Inner,
+          predicate: Eq(InputRef #1, InputRef #14)
+        PhysicalHashJoin:
+            op Inner,
+            predicate: Eq(InputRef #8, InputRef #10)
+          PhysicalHashJoin:
+              op Inner,
+              predicate: Eq(InputRef #0, InputRef #7)
+            PhysicalTableScan:
+                table #5,
+                columns [0, 3, 1, 5, 2, 4, 7],
+                with_row_handler: false,
+                is_sorted: false,
+                expr: None
+            PhysicalTableScan:
+                table #6,
+                columns [1, 0, 4],
+                with_row_handler: false,
+                is_sorted: false,
+                expr: And(GtEq(InputRef #2, Date(Date(8674)) (const)), Lt(InputRef #2, Date(Date(8766)) (const)))
+          PhysicalTableScan:
+              table #7,
+              columns [0, 8, 5, 6],
+              with_row_handler: false,
+              is_sorted: false,
+              expr: Eq(InputRef #1, String("R") (const))
+        PhysicalTableScan:
+            table #0,
+            columns [0, 1],
+            with_row_handler: false,
+            is_sorted: false,
+            expr: None
+*/
+
diff --git a/tests/planner_test/tpch.yml b/tests/planner_test/tpch.yml
new file mode 100644
index 000000000..2fcfc3942
--- /dev/null
+++ b/tests/planner_test/tpch.yml
@@ -0,0 +1,229 @@
+- id: prepare
+  sql: |
+    CREATE TABLE NATION  (
+        N_NATIONKEY  INTEGER NOT NULL,
+        N_NAME       CHAR(25) NOT NULL,
+        N_REGIONKEY  INTEGER NOT NULL,
+        N_COMMENT    VARCHAR(152)
+    );
+
+    CREATE TABLE REGION  (
+        R_REGIONKEY  INTEGER NOT NULL,
+        R_NAME       CHAR(25) NOT NULL,
+        R_COMMENT    VARCHAR(152)
+    );
+
+    CREATE TABLE PART  (
+        P_PARTKEY     INTEGER NOT NULL,
+        P_NAME        VARCHAR(55) NOT NULL,
+        P_MFGR        CHAR(25) NOT NULL,
+        P_BRAND       CHAR(10) NOT NULL,
+        P_TYPE        VARCHAR(25) NOT NULL,
+        P_SIZE        INTEGER NOT NULL,
+        P_CONTAINER   CHAR(10) NOT NULL,
+        P_RETAILPRICE DECIMAL(15,2) NOT NULL,
+        P_COMMENT     VARCHAR(23) NOT NULL
+    );
+
+    CREATE TABLE SUPPLIER (
+        S_SUPPKEY     INTEGER NOT NULL,
+        S_NAME        CHAR(25) NOT NULL,
+        S_ADDRESS     VARCHAR(40) NOT NULL,
+        S_NATIONKEY   INTEGER NOT NULL,
+        S_PHONE       CHAR(15) NOT NULL,
+        S_ACCTBAL     DECIMAL(15,2) NOT NULL,
+        S_COMMENT     VARCHAR(101) NOT NULL
+    );
+
+    CREATE TABLE PARTSUPP (
+        PS_PARTKEY     INTEGER NOT NULL,
+        PS_SUPPKEY     INTEGER NOT NULL,
+        PS_AVAILQTY    INTEGER NOT NULL,
+        PS_SUPPLYCOST  DECIMAL(15,2)  NOT NULL,
+        PS_COMMENT     VARCHAR(199) NOT NULL
+    );
+
+    CREATE TABLE CUSTOMER (
+        C_CUSTKEY     INTEGER NOT NULL,
+        C_NAME        VARCHAR(25) NOT NULL,
+        C_ADDRESS     VARCHAR(40) NOT NULL,
+        C_NATIONKEY   INTEGER NOT NULL,
+        C_PHONE       CHAR(15) NOT NULL,
+        C_ACCTBAL     DECIMAL(15,2)   NOT NULL,
+        C_MKTSEGMENT  CHAR(10) NOT NULL,
+        C_COMMENT     VARCHAR(117) NOT NULL
+    );
+
+    CREATE TABLE ORDERS (
+        O_ORDERKEY       INTEGER NOT NULL,
+        O_CUSTKEY        INTEGER NOT NULL,
+        O_ORDERSTATUS    CHAR(1) NOT NULL,
+        O_TOTALPRICE     DECIMAL(15,2) NOT NULL,
+        O_ORDERDATE      DATE NOT NULL,
+        O_ORDERPRIORITY  CHAR(15) NOT NULL,  
+        O_CLERK          CHAR(15) NOT NULL, 
+        O_SHIPPRIORITY   INTEGER NOT NULL,
+        O_COMMENT        VARCHAR(79) NOT NULL
+    );
+
+    CREATE TABLE LINEITEM (
+        L_ORDERKEY      INTEGER NOT NULL,
+        L_PARTKEY       INTEGER NOT NULL,
+        L_SUPPKEY       INTEGER NOT NULL,
+        L_LINENUMBER    INTEGER NOT NULL,
+        L_QUANTITY      DECIMAL(15,2) NOT NULL,
+        L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL,
+        L_DISCOUNT      DECIMAL(15,2) NOT NULL,
+        L_TAX           DECIMAL(15,2) NOT NULL,
+        L_RETURNFLAG    CHAR(1) NOT NULL,
+        L_LINESTATUS    CHAR(1) NOT NULL,
+        L_SHIPDATE      DATE NOT NULL,
+        L_COMMITDATE    DATE NOT NULL,
+        L_RECEIPTDATE   DATE NOT NULL,
+        L_SHIPINSTRUCT  CHAR(25) NOT NULL,
+        L_SHIPMODE      CHAR(10) NOT NULL,
+        L_COMMENT       VARCHAR(44) NOT NULL
+    );
+
+- id: tpch-q1
+  sql: |
+    explain select
+        l_returnflag,
+        l_linestatus,
+        sum(l_quantity) as sum_qty,
+        sum(l_extendedprice) as sum_base_price,
+        sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
+        sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
+        avg(l_quantity) as avg_qty,
+        avg(l_extendedprice) as avg_price,
+        avg(l_discount) as avg_disc,
+        count(*) as count_order
+    from
+        lineitem
+    where
+        l_shipdate <= date '1998-12-01' - interval '71' day
+    group by
+        l_returnflag,
+        l_linestatus
+    order by
+        l_returnflag,
+        l_linestatus;
+  desc: TPC-H Q1
+  before: ["*prepare"]
+  tasks:
+    - print
+
+- id: tpch-q3
+  sql: |
+    explain select
+        l_orderkey,
+        sum(l_extendedprice * (1 - l_discount)) as revenue,
+        o_orderdate,
+        o_shippriority
+    from
+        customer,
+        orders,
+        lineitem
+    where
+        c_mktsegment = 'BUILDING'
+        and c_custkey = o_custkey
+        and l_orderkey = o_orderkey
+        and o_orderdate < date '1995-03-15'
+        and l_shipdate > date '1995-03-15'
+    group by
+        l_orderkey,
+        o_orderdate,
+        o_shippriority
+    order by
+        revenue desc,
+        o_orderdate
+    limit 10;
+  desc: TPC-H Q3
+  before: ["*prepare"]
+  tasks:
+    - print
+
+- id: tpch-q5
+  sql: |
+    explain select
+        n_name,
+        sum(l_extendedprice * (1 - l_discount)) as revenue
+    from
+        customer,
+        orders,
+        lineitem,
+        supplier,
+        nation,
+        region
+    where
+        c_custkey = o_custkey
+        and l_orderkey = o_orderkey
+        and l_suppkey = s_suppkey
+        and c_nationkey = s_nationkey
+        and s_nationkey = n_nationkey
+        and n_regionkey = r_regionkey
+        and r_name = 'AFRICA'
+        and o_orderdate >= date '1994-01-01'
+        and o_orderdate < date '1994-01-01' + interval '1' year
+    group by
+        n_name
+    order by
+        revenue desc;
+  desc: TPC-H Q5
+  before: ["*prepare"]
+  tasks:
+    - print
+
+- id: tpch-q6
+  sql: |
+    explain select
+        sum(l_extendedprice * l_discount) as revenue
+    from
+        lineitem
+    where
+        l_shipdate >= date '1994-01-01'
+        and l_shipdate < date '1994-01-01' + interval '1' year
+        and l_discount between 0.08 - 0.01 and 0.08 + 0.01
+        and l_quantity < 24;
+  before: ["*prepare"]
+  tasks:
+    - print
+
+- id: tpch-q10
+  sql: |
+    explain select
+        c_custkey,
+        c_name,
+        sum(l_extendedprice * (1 - l_discount)) as revenue,
+        c_acctbal,
+        n_name,
+        c_address,
+        c_phone,
+        c_comment
+    from
+        customer,
+        orders,
+        lineitem,
+        nation
+    where
+        c_custkey = o_custkey
+        and l_orderkey = o_orderkey
+        and o_orderdate >= date '1993-10-01'
+        and o_orderdate < date '1993-10-01' + interval '3' month
+        and l_returnflag = 'R'
+        and c_nationkey = n_nationkey
+    group by
+        c_custkey,
+        c_name,
+        c_acctbal,
+        c_phone,
+        n_name,
+        c_address,
+        c_comment
+    order by
+        revenue desc
+    limit 20;
+  desc: TPC-H Q10
+  before: ["*prepare"]
+  tasks:
+    - print
diff --git a/tests/sqllogictest/src/lib.rs b/tests/sqllogictest/src/lib.rs
index eeb1c10ce..4ff4a013f 100644
--- a/tests/sqllogictest/src/lib.rs
+++ b/tests/sqllogictest/src/lib.rs
@@ -6,7 +6,6 @@ use std::sync::Arc;
 use risinglight::array::*;
 use risinglight::storage::SecondaryStorageOptions;
 use risinglight::{Database, Error};
-use tempfile::tempdir;
 
 pub async fn test_mem(name: &str) {
     init_logger();
@@ -28,12 +27,7 @@ pub async fn test_mem(name: &str) {
 
 pub async fn test_disk(name: &str) {
     init_logger();
-    // still need a temp_dir to write manifest
-    let temp_dir = tempdir().unwrap();
-    let db = Database::new_on_disk(SecondaryStorageOptions::default_for_test(
-        temp_dir.path().to_path_buf(),
-    ))
-    .await;
+    let db = Database::new_on_disk(SecondaryStorageOptions::default_for_test()).await;
     let db = Arc::new(db);
     let mut tester = sqllogictest::Runner::new(DatabaseWrapper { db: db.clone() });
     tester.enable_testdir();
diff --git a/tests/sqlplannertest/Cargo.toml b/tests/sqlplannertest/Cargo.toml
new file mode 100644
index 000000000..846c01321
--- /dev/null
+++ b/tests/sqlplannertest/Cargo.toml
@@ -0,0 +1,16 @@
+[package]
+name = "risinglight_plannertest"
+version = "0.1.0"
+edition = "2021"
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+anyhow = { version = "1", features = ["backtrace"] }
+async-trait = "0.1"
+risinglight = { path = "../.." }
+sqlplannertest = { git = "https://github.com/risinglightdb/sqlplannertest-rs" }
+tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros", "fs"] }
+
+[[test]]
+name = "plannertest"
+harness = false
diff --git a/tests/sqlplannertest/src/bin/apply-planner-test.rs b/tests/sqlplannertest/src/bin/apply-planner-test.rs
new file mode 100644
index 000000000..61204a19d
--- /dev/null
+++ b/tests/sqlplannertest/src/bin/apply-planner-test.rs
@@ -0,0 +1,15 @@
+use std::path::Path;
+
+use anyhow::Result;
+
+#[tokio::main]
+async fn main() -> Result<()> {
+    sqlplannertest::planner_test_apply(
+        Path::new(env!("CARGO_MANIFEST_DIR"))
+            .join("..")
+            .join("planner_test"),
+        || async { Ok(risinglight_plannertest::DatabaseWrapper::default()) },
+    )
+    .await?;
+    Ok(())
+}
diff --git a/tests/sqlplannertest/src/lib.rs b/tests/sqlplannertest/src/lib.rs
new file mode 100644
index 000000000..9decde0f6
--- /dev/null
+++ b/tests/sqlplannertest/src/lib.rs
@@ -0,0 +1,30 @@
+// Copyright 2022 RisingLight Project Authors. Licensed under Apache-2.0.
+
+use anyhow::Error;
+use risinglight::array::*;
+use risinglight::storage::SecondaryStorageOptions;
+use risinglight::Database;
+use sqlplannertest::ParsedTestCase;
+
+#[derive(Default)]
+pub struct DatabaseWrapper;
+
+#[async_trait::async_trait]
+impl sqlplannertest::PlannerTestRunner for DatabaseWrapper {
+    async fn run(&mut self, test_case: &ParsedTestCase) -> Result<String, Error> {
+        if !test_case.tasks.is_empty() {
+            let db = Database::new_on_disk(SecondaryStorageOptions::default_for_test()).await;
+            for sql in &test_case.before_sql {
+                db.run(sql).await?;
+            }
+            let chunks = db.run(&test_case.sql).await?;
+            let output = chunks
+                .iter()
+                .map(datachunk_to_sqllogictest_string)
+                .collect();
+            Ok(output)
+        } else {
+            Ok(String::new())
+        }
+    }
+}
diff --git a/tests/sqlplannertest/tests/plannertest.rs b/tests/sqlplannertest/tests/plannertest.rs
new file mode 100644
index 000000000..b54fe4340
--- /dev/null
+++ b/tests/sqlplannertest/tests/plannertest.rs
@@ -0,0 +1,13 @@
+use std::path::Path;
+
+use anyhow::Result;
+
+fn main() -> Result<()> {
+    sqlplannertest::planner_test_runner(
+        Path::new(env!("CARGO_MANIFEST_DIR"))
+            .join("..")
+            .join("planner_test"),
+        || async { Ok(risinglight_plannertest::DatabaseWrapper::default()) },
+    )?;
+    Ok(())
+}