From e08b908bd4c7050165f890f487980fa38978e102 Mon Sep 17 00:00:00 2001 From: ananclub Date: Wed, 19 Oct 2016 13:13:31 +0800 Subject: [PATCH 01/48] fix windows dynamic build crash --- backup.go | 6 +++--- db.go | 50 +++++++++++++++++++++++++------------------------- dynflag.go | 2 +- iterator.go | 2 +- options.go | 17 ++++++----------- slice.go | 3 ++- 6 files changed, 38 insertions(+), 42 deletions(-) diff --git a/backup.go b/backup.go index a6673ff8..4b37379c 100644 --- a/backup.go +++ b/backup.go @@ -89,7 +89,7 @@ func OpenBackupEngine(opts *Options, path string) (*BackupEngine, error) { be := C.rocksdb_backup_engine_open(opts.c, cpath, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return &BackupEngine{ @@ -110,7 +110,7 @@ func (b *BackupEngine) CreateNewBackup(db *DB) error { C.rocksdb_backup_engine_create_new_backup(b.c, db.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } @@ -138,7 +138,7 @@ func (b *BackupEngine) RestoreDBFromLatestBackup(dbDir, walDir string, ro *Resto C.rocksdb_backup_engine_restore_db_from_latest_backup(b.c, cDbDir, cWalDir, ro.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/db.go b/db.go index 10c7cb1a..ee8ee34d 100644 --- a/db.go +++ b/db.go @@ -31,7 +31,7 @@ func OpenDb(opts *Options, name string) (*DB, error) { defer C.free(unsafe.Pointer(cName)) db := C.rocksdb_open(opts.c, cName, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return &DB{ @@ -50,7 +50,7 @@ func OpenDbForReadOnly(opts *Options, name string, errorIfLogFileExist bool) (*D defer C.free(unsafe.Pointer(cName)) db := C.rocksdb_open_for_read_only(opts.c, cName, boolToChar(errorIfLogFileExist), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return &DB{ @@ -103,7 +103,7 @@ func OpenDbColumnFamilies( &cErr, ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, nil, errors.New(C.GoString(cErr)) } @@ -165,7 +165,7 @@ func OpenDbForReadOnlyColumnFamilies( &cErr, ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, nil, errors.New(C.GoString(cErr)) } @@ -191,7 +191,7 @@ func ListColumnFamilies(opts *Options, name string) ([]string, error) { defer C.free(unsafe.Pointer(cName)) cNames := C.rocksdb_list_column_families(opts.c, cName, &cLen, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } namesLen := int(cLen) @@ -223,7 +223,7 @@ func (db *DB) Get(opts *ReadOptions, key []byte) (*Slice, error) { ) cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewSlice(cValue, cValLen), nil @@ -238,13 +238,13 @@ func (db *DB) GetBytes(opts *ReadOptions, key []byte) ([]byte, error) { ) cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } if cValue == nil { return nil, nil } - defer C.free(unsafe.Pointer(cValue)) + defer C.rocksdb_free(unsafe.Pointer(cValue)) return C.GoBytes(unsafe.Pointer(cValue), C.int(cValLen)), nil } @@ -257,7 +257,7 @@ func (db *DB) GetCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte) (*Sli ) cValue := C.rocksdb_get_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cValLen, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewSlice(cValue, cValLen), nil @@ -272,7 +272,7 @@ func (db *DB) Put(opts *WriteOptions, key, value []byte) error { ) C.rocksdb_put(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -287,7 +287,7 @@ func (db *DB) PutCF(opts *WriteOptions, cf *ColumnFamilyHandle, key, value []byt ) C.rocksdb_put_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -301,7 +301,7 @@ func (db *DB) Delete(opts *WriteOptions, key []byte) error { ) C.rocksdb_delete(db.c, opts.c, cKey, C.size_t(len(key)), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -315,7 +315,7 @@ func (db *DB) DeleteCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte) e ) C.rocksdb_delete_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -330,7 +330,7 @@ func (db *DB) Merge(opts *WriteOptions, key []byte, value []byte) error { ) C.rocksdb_merge(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -346,7 +346,7 @@ func (db *DB) MergeCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte, va ) C.rocksdb_merge_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -357,7 +357,7 @@ func (db *DB) Write(opts *WriteOptions, batch *WriteBatch) error { var cErr *C.char C.rocksdb_write(db.c, opts.c, batch.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -388,7 +388,7 @@ func (db *DB) GetProperty(propName string) string { cprop := C.CString(propName) defer C.free(unsafe.Pointer(cprop)) cValue := C.rocksdb_property_value(db.c, cprop) - defer C.free(unsafe.Pointer(cValue)) + defer C.rocksdb_free(unsafe.Pointer(cValue)) return C.GoString(cValue) } @@ -397,7 +397,7 @@ func (db *DB) GetPropertyCF(propName string, cf *ColumnFamilyHandle) string { cProp := C.CString(propName) defer C.free(unsafe.Pointer(cProp)) cValue := C.rocksdb_property_value_cf(db.c, cf.c, cProp) - defer C.free(unsafe.Pointer(cValue)) + defer C.rocksdb_free(unsafe.Pointer(cValue)) return C.GoString(cValue) } @@ -410,7 +410,7 @@ func (db *DB) CreateColumnFamily(opts *Options, name string) (*ColumnFamilyHandl defer C.free(unsafe.Pointer(cName)) cHandle := C.rocksdb_create_column_family(db.c, opts.c, cName, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewNativeColumnFamilyHandle(cHandle), nil @@ -421,7 +421,7 @@ func (db *DB) DropColumnFamily(c *ColumnFamilyHandle) error { var cErr *C.char C.rocksdb_drop_column_family(db.c, c.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -551,7 +551,7 @@ func (db *DB) Flush(opts *FlushOptions) error { var cErr *C.char C.rocksdb_flush(db.c, opts.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -562,7 +562,7 @@ func (db *DB) DisableFileDeletions() error { var cErr *C.char C.rocksdb_disable_file_deletions(db.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -573,7 +573,7 @@ func (db *DB) EnableFileDeletions(force bool) error { var cErr *C.char C.rocksdb_enable_file_deletions(db.c, boolToChar(force), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -603,7 +603,7 @@ func DestroyDb(name string, opts *Options) error { defer C.free(unsafe.Pointer(cName)) C.rocksdb_destroy_db(opts.c, cName, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -618,7 +618,7 @@ func RepairDb(name string, opts *Options) error { defer C.free(unsafe.Pointer(cName)) C.rocksdb_repair_db(opts.c, cName, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/dynflag.go b/dynflag.go index 0b8a3ba4..ccda9ae8 100644 --- a/dynflag.go +++ b/dynflag.go @@ -2,5 +2,5 @@ package gorocksdb -// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy +// #cgo LDFLAGS: -lrocksdb_je import "C" diff --git a/iterator.go b/iterator.go index fd94b776..fb0c5bea 100644 --- a/iterator.go +++ b/iterator.go @@ -106,7 +106,7 @@ func (iter *Iterator) Err() error { var cErr *C.char C.rocksdb_iter_get_error(iter.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/options.go b/options.go index 3b532ed4..2f55f17f 100644 --- a/options.go +++ b/options.go @@ -18,6 +18,8 @@ const ( SnappyCompression = CompressionType(C.rocksdb_snappy_compression) ZLibCompression = CompressionType(C.rocksdb_zlib_compression) Bz2Compression = CompressionType(C.rocksdb_bz2_compression) + Lz4Compression = CompressionType(C.rocksdb_lz4_compression) + Lz4hcCompression = CompressionType(C.rocksdb_lz4hc_compression) ) // CompactionStyle specifies the compaction style. @@ -456,9 +458,6 @@ func (opts *Options) SetMaxBytesForLevelMultiplierAdditional(value []int) { // if it would make the total compaction cover more than // (expanded_compaction_factor * targetFileSizeLevel()) many bytes. // Default: 25 -func (opts *Options) SetExpandedCompactionFactor(value int) { - C.rocksdb_options_set_expanded_compaction_factor(opts.c, C.int(value)) -} // SetSourceCompactionFactor sets the maximum number of bytes // in all source files to be compacted in a single compaction run. @@ -468,17 +467,13 @@ func (opts *Options) SetExpandedCompactionFactor(value int) { // for compaction to exceed // (source_compaction_factor * targetFileSizeLevel()) many bytes. // Default: 1 -func (opts *Options) SetSourceCompactionFactor(value int) { - C.rocksdb_options_set_source_compaction_factor(opts.c, C.int(value)) -} + // SetMaxGrandparentOverlapFactor sets the maximum bytes // of overlaps in grandparent (i.e., level+2) before we // stop building a single file in a level->level+1 compaction. // Default: 10 -func (opts *Options) SetMaxGrandparentOverlapFactor(value int) { - C.rocksdb_options_set_max_grandparent_overlap_factor(opts.c, C.int(value)) -} + // SetDisableDataSync enable/disable data sync. // @@ -512,7 +507,7 @@ func (opts *Options) SetUseFsync(value bool) { // Default: empty func (opts *Options) SetDbLogDir(value string) { cvalue := C.CString(value) - defer C.free(unsafe.Pointer(cvalue)) + defer C.rocksdb_free(unsafe.Pointer(cvalue)) C.rocksdb_options_set_db_log_dir(opts.c, cvalue) } @@ -524,7 +519,7 @@ func (opts *Options) SetDbLogDir(value string) { // Default: empty func (opts *Options) SetWalDir(value string) { cvalue := C.CString(value) - defer C.free(unsafe.Pointer(cvalue)) + defer C.rocksdb_free(unsafe.Pointer(cvalue)) C.rocksdb_options_set_wal_dir(opts.c, cvalue) } diff --git a/slice.go b/slice.go index acc69b41..2254924d 100644 --- a/slice.go +++ b/slice.go @@ -1,6 +1,7 @@ package gorocksdb // #include +// #include "rocksdb/c.h" import "C" import "unsafe" @@ -29,7 +30,7 @@ func (s *Slice) Size() int { // Free frees the slice data. func (s *Slice) Free() { if !s.freed { - C.free(unsafe.Pointer(s.data)) + C.rocksdb_free(unsafe.Pointer(s.data)) s.freed = true } } From eff08f01b5a54828981789ba9faca9545d2e40a1 Mon Sep 17 00:00:00 2001 From: ananclub Date: Thu, 23 Nov 2017 09:28:52 +0800 Subject: [PATCH 02/48] use rocksdb_free for release --- checkpoint.go | 2 +- db.go | 10 +++++----- dynflag.go | 2 +- options.go | 4 ++-- slice.go | 3 +-- sst_file_writer.go | 6 +++--- transaction.go | 10 +++++----- transactiondb.go | 10 +++++----- 8 files changed, 23 insertions(+), 24 deletions(-) diff --git a/checkpoint.go b/checkpoint.go index a7d2bf40..4a6436d2 100644 --- a/checkpoint.go +++ b/checkpoint.go @@ -43,7 +43,7 @@ func (checkpoint *Checkpoint) CreateCheckpoint(checkpoint_dir string, log_size_f C.rocksdb_checkpoint_create(checkpoint.c, cDir, C.uint64_t(log_size_for_flush), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/db.go b/db.go index 89953110..76fd131d 100644 --- a/db.go +++ b/db.go @@ -244,7 +244,7 @@ func (db *DB) GetBytes(opts *ReadOptions, key []byte) ([]byte, error) { if cValue == nil { return nil, nil } - defer C.rocksdb_free(unsafe.Pointer(cValue)) + defer C.free(unsafe.Pointer(cValue)) return C.GoBytes(unsafe.Pointer(cValue), C.int(cValLen)), nil } @@ -394,7 +394,7 @@ func (db *DB) GetProperty(propName string) string { cprop := C.CString(propName) defer C.free(unsafe.Pointer(cprop)) cValue := C.rocksdb_property_value(db.c, cprop) - defer C.rocksdb_free(unsafe.Pointer(cValue)) + defer C.free(unsafe.Pointer(cValue)) return C.GoString(cValue) } @@ -617,7 +617,7 @@ func (db *DB) IngestExternalFile(filePaths []string, opts *IngestExternalFileOpt ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -647,7 +647,7 @@ func (db *DB) IngestExternalFileCF(handle *ColumnFamilyHandle, filePaths []strin ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -662,7 +662,7 @@ func (db *DB) NewCheckpoint() (*Checkpoint, error) { db.c, &cErr, ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } diff --git a/dynflag.go b/dynflag.go index b57e9895..70072bb0 100644 --- a/dynflag.go +++ b/dynflag.go @@ -1,4 +1,4 @@ package gorocksdb -// #cgo LDFLAGS: -lrocksdb_je +// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy import "C" diff --git a/options.go b/options.go index f7ac80c7..f756cddb 100644 --- a/options.go +++ b/options.go @@ -564,7 +564,7 @@ func (opts *Options) SetUseFsync(value bool) { // Default: empty func (opts *Options) SetDbLogDir(value string) { cvalue := C.CString(value) - defer C.rocksdb_free(unsafe.Pointer(cvalue)) + defer C.free(unsafe.Pointer(cvalue)) C.rocksdb_options_set_db_log_dir(opts.c, cvalue) } @@ -576,7 +576,7 @@ func (opts *Options) SetDbLogDir(value string) { // Default: empty func (opts *Options) SetWalDir(value string) { cvalue := C.CString(value) - defer C.rocksdb_free(unsafe.Pointer(cvalue)) + defer C.free(unsafe.Pointer(cvalue)) C.rocksdb_options_set_wal_dir(opts.c, cvalue) } diff --git a/slice.go b/slice.go index 9c10a612..d8b7a2e9 100644 --- a/slice.go +++ b/slice.go @@ -1,7 +1,6 @@ package gorocksdb // #include -// #include "rocksdb/c.h" import "C" import "unsafe" @@ -37,7 +36,7 @@ func (s *Slice) Size() int { // Free frees the slice data. func (s *Slice) Free() { if !s.freed { - C.rocksdb_free(unsafe.Pointer(s.data)) + C.free(unsafe.Pointer(s.data)) s.freed = true } } diff --git a/sst_file_writer.go b/sst_file_writer.go index 0f4689c2..54f2c139 100644 --- a/sst_file_writer.go +++ b/sst_file_writer.go @@ -30,7 +30,7 @@ func (w *SSTFileWriter) Open(path string) error { defer C.free(unsafe.Pointer(cPath)) C.rocksdb_sstfilewriter_open(w.c, cPath, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -44,7 +44,7 @@ func (w *SSTFileWriter) Add(key, value []byte) error { var cErr *C.char C.rocksdb_sstfilewriter_add(w.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -55,7 +55,7 @@ func (w *SSTFileWriter) Finish() error { var cErr *C.char C.rocksdb_sstfilewriter_finish(w.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/transaction.go b/transaction.go index ae31afd3..48c7a9ef 100644 --- a/transaction.go +++ b/transaction.go @@ -26,7 +26,7 @@ func (transaction *Transaction) Commit() error { ) C.rocksdb_transaction_commit(transaction.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -40,7 +40,7 @@ func (transaction *Transaction) Rollback() error { C.rocksdb_transaction_rollback(transaction.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -57,7 +57,7 @@ func (transaction *Transaction) Get(opts *ReadOptions, key []byte) (*Slice, erro transaction.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr, ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewSlice(cValue, cValLen), nil @@ -74,7 +74,7 @@ func (transaction *Transaction) Put(key, value []byte) error { transaction.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr, ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -88,7 +88,7 @@ func (transaction *Transaction) Delete(key []byte) error { ) C.rocksdb_transaction_delete(transaction.c, cKey, C.size_t(len(key)), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/transactiondb.go b/transactiondb.go index f5d2fd70..cfdeac9c 100644 --- a/transactiondb.go +++ b/transactiondb.go @@ -30,7 +30,7 @@ func OpenTransactionDb( db := C.rocksdb_transactiondb_open( opts.c, transactionDBOpts.c, cName, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return &TransactionDB{ @@ -83,7 +83,7 @@ func (db *TransactionDB) Get(opts *ReadOptions, key []byte) (*Slice, error) { db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr, ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewSlice(cValue, cValLen), nil @@ -100,7 +100,7 @@ func (db *TransactionDB) Put(opts *WriteOptions, key, value []byte) error { db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr, ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -114,7 +114,7 @@ func (db *TransactionDB) Delete(opts *WriteOptions, key []byte) error { ) C.rocksdb_transactiondb_delete(db.c, opts.c, cKey, C.size_t(len(key)), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -129,7 +129,7 @@ func (db *TransactionDB) NewCheckpoint() (*Checkpoint, error) { db.c, &cErr, ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } From fc04b7411a081f2f7203e797886170f878003dfb Mon Sep 17 00:00:00 2001 From: olebedev Date: Wed, 31 Jan 2018 11:30:21 +0500 Subject: [PATCH 03/48] add wal iterator --- db.go | 10 ++++ db_test.go | 4 +- wal_iterator.go | 44 ++++++++++++++++ wal_iterator_test.go | 119 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 175 insertions(+), 2 deletions(-) mode change 100644 => 100755 db.go mode change 100644 => 100755 db_test.go create mode 100755 wal_iterator.go create mode 100755 wal_iterator_test.go diff --git a/db.go b/db.go old mode 100644 new mode 100755 index 2b67f354..f448d336 --- a/db.go +++ b/db.go @@ -377,6 +377,16 @@ func (db *DB) NewIteratorCF(opts *ReadOptions, cf *ColumnFamilyHandle) *Iterator return NewNativeIterator(unsafe.Pointer(cIter)) } +func (db *DB) GetUpdatesSince (seq_number uint64) *WalIterator { + var cErr *C.char + cIter := C.rocksdb_get_updates_since(db.c, C.uint64_t(seq_number), nil, &cErr) + return NewNativeWalIterator(unsafe.Pointer(cIter)) +} + +func (db *DB) GetLatestSequenceNumber () uint64 { + return uint64(C.rocksdb_get_latest_sequence_number(db.c)) +} + // NewSnapshot creates a new snapshot of the database. func (db *DB) NewSnapshot() *Snapshot { cSnap := C.rocksdb_create_snapshot(db.c) diff --git a/db_test.go b/db_test.go old mode 100644 new mode 100755 index 7d4d5ff2..423a71e6 --- a/db_test.go +++ b/db_test.go @@ -97,8 +97,8 @@ func newTestDB(t *testing.T, name string, applyOpts func(opts *Options)) *DB { opts := NewDefaultOptions() // test the ratelimiter - rateLimiter := NewRateLimiter(1024, 100*1000, 10) - opts.SetRateLimiter(rateLimiter) + //rateLimiter := NewRateLimiter(1024, 100*1000, 10) + //opts.SetRateLimiter(rateLimiter) opts.SetCreateIfMissing(true) if applyOpts != nil { applyOpts(opts) diff --git a/wal_iterator.go b/wal_iterator.go new file mode 100755 index 00000000..bc801bd8 --- /dev/null +++ b/wal_iterator.go @@ -0,0 +1,44 @@ +package gorocksdb +// #include +// #include "rocksdb/c.h" +import "C" +import ( + "unsafe" +) + +type WalIterator struct { + c *C.rocksdb_wal_iterator_t +} + +func NewNativeWalIterator(c unsafe.Pointer) *WalIterator { + return &WalIterator{(*C.rocksdb_wal_iterator_t)(c)} +} + +func (iter *WalIterator) Valid() bool { + return C.rocksdb_wal_iter_valid(iter.c) != 0 +} + +func (iter *WalIterator) Next() { + C.rocksdb_wal_iter_next(iter.c) +} + +func (iter *WalIterator) Status() string { + var cErr *C.char + C.rocksdb_wal_iter_status(iter.c, &cErr) + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return C.GoString(cErr) + } + return "unknown" +} + +func (iter *WalIterator) Destroy() { + C.rocksdb_wal_iter_destroy(iter.c) + iter.c = nil +} + +func (iter *WalIterator) Batch() (*WriteBatch, uint64) { + var cSeq C.uint64_t + cB := C.rocksdb_wal_iter_get_batch(iter.c, &cSeq) + return NewNativeWriteBatch(cB), uint64(cSeq) +} \ No newline at end of file diff --git a/wal_iterator_test.go b/wal_iterator_test.go new file mode 100755 index 00000000..0d214f68 --- /dev/null +++ b/wal_iterator_test.go @@ -0,0 +1,119 @@ +package gorocksdb + +import ( + "testing" + + "github.com/facebookgo/ensure" + "fmt" + "time" + "math/rand" + + "io/ioutil" +) + +func SlowWriter (db *DB, count int, name string, cf *ColumnFamilyHandle) { + wo := NewDefaultWriteOptions() + for i:=0; i WRITE ERROR", err.Error()) + } else { + //fmt.Printf("> %d %s\n", i, key) + } + time.Sleep(time.Duration(rand.Int()%10)) // 0..9.99ms + if (i+1)%100 == 0 { + fmt.Printf("generated %d records\n", i+1) + time.Sleep(time.Second) + } + } + fmt.Println(">i think i am done", name) +} + +func TestWalIterator(t *testing.T) { + dir, err := ioutil.TempDir("", "gorocksdb-wal-cf") + fmt.Println("DIR", dir) + if err!=nil { + t.Fail() + t.Log(err.Error()) + return + } + var cf_names = []string{"default", "one", "two", "three"} + + opts := NewDefaultOptions() + opts.SetCreateIfMissing(true) + opts.SetCreateIfMissingColumnFamilies(true) + opts.SetWALTtlSeconds(1) + + var cfopts = []*Options{opts, opts, opts, opts} + + db, handles, err := OpenDbColumnFamilies(opts, dir, cf_names, cfopts) + if err!=nil { + t.Fail() + t.Log(err.Error()) + return + } + //db := newTestDB(t, "TestWalIterator", nil) + + _ = handles + + //wo := NewDefaultWriteOptions() + //db.Put(wo, []byte("start_key"), []byte("value")) + count := 1<<10 + go SlowWriter(db, count>>2, "one", handles[1]) + go SlowWriter(db, count>>2, "two", handles[2]) + go SlowWriter(db, count>>2, "three", handles[3]) + go SlowWriter(db, count>>2, "default", handles[0]) + var i int + var seq uint64 + var iter *WalIterator + cfCount := [4]int{0,0,0,0} + for iseq { + seq = newSeq + //fmt.Printf("< %d ", seq) + for bi := batch.NewIterator(); bi.Next(); { + rec := bi.Record() + fmt.Printf("%d '%s' (%d)\n", rec.CF, string(rec.Key), seq) + i++ + cfCount[rec.CF]++ + } + } else { + seq++ // :( + } + //fmt.Println() + batch.Destroy() + } + if iter!=nil { + iter.Destroy() + } + fmt.Println(">2,count>>2,count>>2,count>>2,}) + + for i:=0; i Date: Mon, 12 Nov 2018 13:21:10 +0800 Subject: [PATCH 04/48] add wrapper for GetOptionsFromString --- options.go | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/options.go b/options.go index 187c2f5c..39adfe98 100644 --- a/options.go +++ b/options.go @@ -3,7 +3,10 @@ package gorocksdb // #include "rocksdb/c.h" // #include "gorocksdb.h" import "C" -import "unsafe" +import ( + "errors" + "unsafe" +) // CompressionType specifies the block compression. // DB contents are stored in a set of blocks, each of which holds a @@ -82,6 +85,30 @@ func NewNativeOptions(c *C.rocksdb_options_t) *Options { return &Options{c: c} } +// GetOptionsFromString creates a Options object from existing opt and string. +// If base is nil, a default opt create by NewDefaultOptions will be used as base opt. +func GetOptionsFromString(base *Options, optStr string) (*Options, error) { + if base == nil { + base = NewDefaultOptions() + defer base.Destroy() + } + + var ( + cErr *C.char + cOptStr = C.CString(optStr) + ) + defer C.free(unsafe.Pointer(cOptStr)) + + newOpt := NewDefaultOptions() + C.rocksdb_get_options_from_string(base.c, cOptStr, newOpt.c, &cErr) + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return nil, errors.New(C.GoString(cErr)) + } + + return newOpt, nil +} + // ------------------- // Parameters that affect behavior From e0ef1dd3f0d154dc47bafa2d76426c155380fd72 Mon Sep 17 00:00:00 2001 From: Bertrand Mermet Date: Sat, 15 Dec 2018 02:25:59 +0100 Subject: [PATCH 05/48] Add GetStatisticsString method to Options --- options.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/options.go b/options.go index 187c2f5c..2bfd1b50 100644 --- a/options.go +++ b/options.go @@ -937,6 +937,13 @@ func (opts *Options) SetFIFOCompactionOptions(value *FIFOCompactionOptions) { C.rocksdb_options_set_fifo_compaction_options(opts.c, value.c) } +// GetStatisticsString returns the statistics as a string. +func (opts *Options) GetStatisticsString() string { + sString := C.rocksdb_options_statistics_get_string(opts.c) + defer C.free(unsafe.Pointer(sString)) + return C.GoString(sString) +} + // SetRateLimiter sets the rate limiter of the options. // Use to control write rate of flush and compaction. Flush has higher // priority than compaction. Rate limiting is disabled if nullptr. From 28210cc645ec431d90b978ed11d62037d5f68cfe Mon Sep 17 00:00:00 2001 From: N3205 Date: Mon, 24 Dec 2018 10:46:25 +0800 Subject: [PATCH 06/48] fix memory leak in deleting filterpolicy and mergeoperator value --- gorocksdb.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index c8258376..efebbe51 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -37,7 +37,9 @@ rocksdb_filterpolicy_t* gorocksdb_filterpolicy_create(uintptr_t idx) { (const char *(*)(void*))(gorocksdb_filterpolicy_name)); } -void gorocksdb_filterpolicy_delete_filter(void* state, const char* v, size_t s) { } +void gorocksdb_filterpolicy_delete_filter(void* state, const char* v, size_t s) { + free((char*)v); +} /* Merge Operator */ @@ -51,7 +53,9 @@ rocksdb_mergeoperator_t* gorocksdb_mergeoperator_create(uintptr_t idx) { (const char* (*)(void*))(gorocksdb_mergeoperator_name)); } -void gorocksdb_mergeoperator_delete_value(void* id, const char* v, size_t s) { } +void gorocksdb_mergeoperator_delete_value(void* id, const char* v, size_t s) { + free((char*)v); +} /* Slice Transform */ From f1ea5ee273da916895a81910ec123986c3609404 Mon Sep 17 00:00:00 2001 From: Paul Mach Date: Wed, 9 Jan 2019 19:33:46 -0500 Subject: [PATCH 07/48] fix GetApproximateSizes --- db.go | 22 ++++++++++++++++++---- db_test.go | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 4 deletions(-) diff --git a/db.go b/db.go index fdfc8186..5533d13c 100644 --- a/db.go +++ b/db.go @@ -543,12 +543,19 @@ func (db *DB) GetApproximateSizes(ranges []Range) []uint64 { cStartLens := make([]C.size_t, len(ranges)) cLimitLens := make([]C.size_t, len(ranges)) for i, r := range ranges { - cStarts[i] = byteToChar(r.Start) + cStarts[i] = (*C.char)(C.CBytes(r.Start)) cStartLens[i] = C.size_t(len(r.Start)) - cLimits[i] = byteToChar(r.Limit) + cLimits[i] = (*C.char)(C.CBytes(r.Limit)) cLimitLens[i] = C.size_t(len(r.Limit)) } + defer func() { + for i := range ranges { + C.free(unsafe.Pointer(cStarts[i])) + C.free(unsafe.Pointer(cLimits[i])) + } + }() + C.rocksdb_approximate_sizes( db.c, C.int(len(ranges)), @@ -577,12 +584,19 @@ func (db *DB) GetApproximateSizesCF(cf *ColumnFamilyHandle, ranges []Range) []ui cStartLens := make([]C.size_t, len(ranges)) cLimitLens := make([]C.size_t, len(ranges)) for i, r := range ranges { - cStarts[i] = byteToChar(r.Start) + cStarts[i] = (*C.char)(C.CBytes(r.Start)) cStartLens[i] = C.size_t(len(r.Start)) - cLimits[i] = byteToChar(r.Limit) + cLimits[i] = (*C.char)(C.CBytes(r.Limit)) cLimitLens[i] = C.size_t(len(r.Limit)) } + defer func() { + for i := range ranges { + C.free(unsafe.Pointer(cStarts[i])) + C.free(unsafe.Pointer(cLimits[i])) + } + }() + C.rocksdb_approximate_sizes_cf( db.c, cf.c, diff --git a/db_test.go b/db_test.go index c689b35e..b08171bd 100644 --- a/db_test.go +++ b/db_test.go @@ -172,3 +172,42 @@ func TestDBMultiGet(t *testing.T) { ensure.DeepEqual(t, values[2].Data(), givenVal2) ensure.DeepEqual(t, values[3].Data(), givenVal3) } + +func TestDBGetApproximateSizes(t *testing.T) { + db := newTestDB(t, "TestDBGetApproximateSizes", nil) + defer db.Close() + + // no ranges + sizes := db.GetApproximateSizes(nil) + ensure.DeepEqual(t, len(sizes), 0) + + // range will nil start and limit + sizes = db.GetApproximateSizes([]Range{{Start: nil, Limit: nil}}) + ensure.DeepEqual(t, sizes, []uint64{0}) + + // valid range + sizes = db.GetApproximateSizes([]Range{{Start: []byte{0x00}, Limit: []byte{0xFF}}}) + ensure.DeepEqual(t, sizes, []uint64{0}) +} + +func TestDBGetApproximateSizesCF(t *testing.T) { + db := newTestDB(t, "TestDBGetApproximateSizesCF", nil) + defer db.Close() + + o := NewDefaultOptions() + + cf, err := db.CreateColumnFamily(o, "other") + ensure.Nil(t, err) + + // no ranges + sizes := db.GetApproximateSizesCF(cf, nil) + ensure.DeepEqual(t, len(sizes), 0) + + // range will nil start and limit + sizes = db.GetApproximateSizesCF(cf, []Range{{Start: nil, Limit: nil}}) + ensure.DeepEqual(t, sizes, []uint64{0}) + + // valid range + sizes = db.GetApproximateSizesCF(cf, []Range{{Start: []byte{0x00}, Limit: []byte{0xFF}}}) + ensure.DeepEqual(t, sizes, []uint64{0}) +} From 65e4f3a3cd8777a7568e6cfaf2ddaec34f81da2c Mon Sep 17 00:00:00 2001 From: Rupert Chen Date: Mon, 28 Jan 2019 14:24:20 -0500 Subject: [PATCH 08/48] Add support for opening a db with TTL support --- db.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/db.go b/db.go index fdfc8186..23fb0a3c 100644 --- a/db.go +++ b/db.go @@ -42,6 +42,25 @@ func OpenDb(opts *Options, name string) (*DB, error) { }, nil } +// OpenDbWithTTL opens a database with TTL support with the specified options. +func OpenDbWithTTL(opts *Options, name string, ttl int) (*DB, error) { + var ( + cErr *C.char + cName = C.CString(name) + ) + defer C.free(unsafe.Pointer(cName)) + db := C.rocksdb_open_with_ttl(opts.c, cName, C.int(ttl), &cErr) + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return nil, errors.New(C.GoString(cErr)) + } + return &DB{ + name: name, + c: db, + opts: opts, + }, nil +} + // OpenDbForReadOnly opens a database with the specified options for readonly usage. func OpenDbForReadOnly(opts *Options, name string, errorIfLogFileExist bool) (*DB, error) { var ( From 41daeedb2dcebe7be379793d3cc9e3c5df37d786 Mon Sep 17 00:00:00 2001 From: Rupert Chen Date: Mon, 28 Jan 2019 15:01:57 -0500 Subject: [PATCH 09/48] Add *DB.SetOptions(). --- db.go | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/db.go b/db.go index fdfc8186..6d82b1de 100644 --- a/db.go +++ b/db.go @@ -596,6 +596,37 @@ func (db *DB) GetApproximateSizesCF(cf *ColumnFamilyHandle, ranges []Range) []ui return sizes } +// SetOptions dynamically changes options through the SetOptions API. +func (db *DB) SetOptions(keys, values []string) error { + num_keys := len(keys) + + if num_keys == 0 { + return nil + } + + cKeys := make([]*C.char, num_keys) + cValues := make([]*C.char, num_keys) + for i := range keys { + cKeys[i] = C.CString(keys[i]) + cValues[i] = C.CString(values[i]) + } + + var cErr *C.char + + C.rocksdb_set_options( + db.c, + C.int(num_keys), + &cKeys[0], + &cValues[0], + &cErr, + ) + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return errors.New(C.GoString(cErr)) + } + return nil +} + // LiveFileMetadata is a metadata which is associated with each SST file. type LiveFileMetadata struct { Name string From c32be751135a662ee5136cc9b6e71fe8f8382af3 Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Thu, 25 Apr 2019 17:40:19 +0800 Subject: [PATCH 10/48] DeleteRange DeleteRangeCF --- write_batch.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/write_batch.go b/write_batch.go index 88b5aac8..bf2d2521 100644 --- a/write_batch.go +++ b/write_batch.go @@ -68,6 +68,18 @@ func (wb *WriteBatch) DeleteCF(cf *ColumnFamilyHandle, key []byte) { C.rocksdb_writebatch_delete_cf(wb.c, cf.c, cKey, C.size_t(len(key))) } +func (wb *WriteBatch) DeleteRange(beginKey []byte, endKey []byte) { + cBeginKey := byteToChar(beginKey) + cEndKey := byteToChar(endKey) + C.rocksdb_writebatch_delete_range(wb.c, cBeginKey, C.size_t(len(beginKey)), cEndKey, C.size_t(len(endKey))) +} + +func (wb *WriteBatch) DeleteRangeCF(cf *ColumnFamilyHandle, beginKey []byte, endKey []byte) { + cBeginKey := byteToChar(beginKey) + cEndKey := byteToChar(endKey) + C.rocksdb_writebatch_delete_range_cf(wb.c, cf.c, cBeginKey, C.size_t(len(beginKey)), cEndKey, C.size_t(len(endKey))) +} + // Data returns the serialized version of this batch. func (wb *WriteBatch) Data() []byte { var cSize C.size_t From 53f98ff36ad4a91c97c1390619e2799095690666 Mon Sep 17 00:00:00 2001 From: Jack Wakefield Date: Sat, 1 Dec 2018 19:22:14 +0000 Subject: [PATCH 11/48] Add pinned slices --- db.go | 14 ++++++++++++++ db_test.go | 17 +++++++++++++++-- slice.go | 28 ++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 2 deletions(-) diff --git a/db.go b/db.go index fdfc8186..08b16889 100644 --- a/db.go +++ b/db.go @@ -264,6 +264,20 @@ func (db *DB) GetCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte) (*Sli return NewSlice(cValue, cValLen), nil } +// GetPinned returns the data associated with the key from the database. +func (db *DB) GetPinned(opts *ReadOptions, key []byte) (*PinnableSliceHandle, error) { + var ( + cErr *C.char + cKey = byteToChar(key) + ) + cHandle := C.rocksdb_get_pinned(db.c, opts.c, cKey, C.size_t(len(key)), &cErr) + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return nil, errors.New(C.GoString(cErr)) + } + return NewNativePinnableSliceHandle(cHandle), nil +} + // MultiGet returns the data associated with the passed keys from the database func (db *DB) MultiGet(opts *ReadOptions, keys ...[]byte) (Slices, error) { cKeys, cKeySizes := byteSlicesToCSlices(keys) diff --git a/db_test.go b/db_test.go index c689b35e..8dc0ec77 100644 --- a/db_test.go +++ b/db_test.go @@ -21,6 +21,7 @@ func TestDBCRUD(t *testing.T) { givenKey = []byte("hello") givenVal1 = []byte("world1") givenVal2 = []byte("world2") + givenVal3 = []byte("world2") wo = NewDefaultWriteOptions() ro = NewDefaultReadOptions() ) @@ -41,11 +42,23 @@ func TestDBCRUD(t *testing.T) { ensure.Nil(t, err) ensure.DeepEqual(t, v2.Data(), givenVal2) + // retrieve pinned + v3, err := db.GetPinned(ro, givenKey) + defer v3.Destroy() + ensure.Nil(t, err) + ensure.DeepEqual(t, v3.Data(), givenVal3) + // delete ensure.Nil(t, db.Delete(wo, givenKey)) - v3, err := db.Get(ro, givenKey) + v4, err := db.Get(ro, givenKey) ensure.Nil(t, err) - ensure.True(t, v3.Data() == nil) + ensure.True(t, v4.Data() == nil) + + // retrieve missing pinned + v5, err := db.GetPinned(ro, givenKey) + defer v5.Destroy() + ensure.Nil(t, err) + ensure.Nil(t, v5.Data()) } func TestDBCRUDDBPaths(t *testing.T) { diff --git a/slice.go b/slice.go index 22b48e09..bcdc4c16 100644 --- a/slice.go +++ b/slice.go @@ -1,6 +1,7 @@ package gorocksdb // #include +// #include "rocksdb/c.h" import "C" import "unsafe" @@ -48,3 +49,30 @@ func (s *Slice) Free() { s.freed = true } } + +// PinnableSliceHandle represents a handle to a PinnableSlice. +type PinnableSliceHandle struct { + c *C.rocksdb_pinnableslice_t +} + +// NewNativePinnableSliceHandle creates a PinnableSliceHandle object. +func NewNativePinnableSliceHandle(c *C.rocksdb_pinnableslice_t) *PinnableSliceHandle { + return &PinnableSliceHandle{c} +} + +// Data returns the data of the slice. +func (h *PinnableSliceHandle) Data() []byte { + if h.c == nil { + return nil + } + + var cValLen C.size_t + cValue := C.rocksdb_pinnableslice_value(h.c, &cValLen) + + return charToByte(cValue, cValLen) +} + +// Destroy calls the destructor of the underlying pinnable slice handle. +func (h *PinnableSliceHandle) Destroy() { + C.rocksdb_pinnableslice_destroy(h.c) +} From 3e9dad2cc5db159bdac5c3a9f2d78df32f99bcc0 Mon Sep 17 00:00:00 2001 From: Artem Yarulin Date: Fri, 24 May 2019 00:50:30 +0300 Subject: [PATCH 12/48] Correct default value for SetWriteBufferSize According to https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#column-family-write-buffer-size default for `SetWriteBufferSize` should be `64MB` and not `4MB` --- options.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/options.go b/options.go index 0f1376ad..c0b430a3 100644 --- a/options.go +++ b/options.go @@ -333,7 +333,7 @@ func (opts *Options) OptimizeUniversalStyleCompaction(memtable_memory_budget uin // so you may wish to adjust this parameter to control memory usage. // Also, a larger write buffer will result in a longer recovery time // the next time the database is opened. -// Default: 4MB +// Default: 64MB func (opts *Options) SetWriteBufferSize(value int) { C.rocksdb_options_set_write_buffer_size(opts.c, C.size_t(value)) } From acc24fca25a7ee01ac2ef1715f4ef6589502746b Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Fri, 24 May 2019 12:42:27 +0800 Subject: [PATCH 13/48] DeleteFileInRange --- db.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/db.go b/db.go index fdfc8186..07f285bb 100644 --- a/db.go +++ b/db.go @@ -688,6 +688,26 @@ func (db *DB) DeleteFile(name string) { C.rocksdb_delete_file(db.c, cName) } +func (db *DB) DeleteFileInRange(beginKey []byte, limitKey []byte) error { + cBeginKey := byteToChar(beginKey) + cLimitKey := byteToChar(limitKey) + + var cErr *C.char + + C.rocksdb_delete_file_in_range( + db.c, + cBeginKey, C.size_t(len(beginKey)), + cLimitKey, C.size_t(len(limitKey)), + &cErr, + ) + + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return errors.New(C.GoString(cErr)) + } + return nil +} + // IngestExternalFile loads a list of external SST files. func (db *DB) IngestExternalFile(filePaths []string, opts *IngestExternalFileOptions) error { cFilePaths := make([]*C.char, len(filePaths)) From ea499c04be20a0f679128aeec52822643ceded40 Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Mon, 3 Jun 2019 15:10:38 +0800 Subject: [PATCH 14/48] DeleteFileInRange and comments --- db.go | 34 +++++++++++++++++++++++++++++----- write_batch.go | 15 +++++++++------ 2 files changed, 38 insertions(+), 11 deletions(-) diff --git a/db.go b/db.go index 09a79fb7..9a8d99b1 100644 --- a/db.go +++ b/db.go @@ -766,16 +766,40 @@ func (db *DB) DeleteFile(name string) { C.rocksdb_delete_file(db.c, cName) } -func (db *DB) DeleteFileInRange(beginKey []byte, limitKey []byte) error { - cBeginKey := byteToChar(beginKey) - cLimitKey := byteToChar(limitKey) +// DeleteFileInRange deletes SST files that contain keys between the Range, [r.Start, limitKey] +func (db *DB) DeleteFileInRange(r Range) error { + cStartKey := byteToChar(r.Start) + cLimitKey := byteToChar(r.Limit) var cErr *C.char C.rocksdb_delete_file_in_range( db.c, - cBeginKey, C.size_t(len(beginKey)), - cLimitKey, C.size_t(len(limitKey)), + cStartKey, C.size_t(len(r.Start)), + cLimitKey, C.size_t(len(r.Limit)), + &cErr, + ) + + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return errors.New(C.GoString(cErr)) + } + return nil +} + +// DeleteFileInRangeCF deletes SST files that contain keys between the Range, [r.Start, r.Limit], and +// belong to a given column family +func (db *DB) DeleteFileInRangeCF(cf *ColumnFamilyHandle, r Range) error { + cStartKey := byteToChar(r.Start) + cLimitKey := byteToChar(r.Limit) + + var cErr *C.char + + C.rocksdb_delete_file_in_range_cf( + db.c, + cf.c, + cStartKey, C.size_t(len(r.Start)), + cLimitKey, C.size_t(len(r.Limit)), &cErr, ) diff --git a/write_batch.go b/write_batch.go index bf2d2521..e66c5b56 100644 --- a/write_batch.go +++ b/write_batch.go @@ -68,16 +68,19 @@ func (wb *WriteBatch) DeleteCF(cf *ColumnFamilyHandle, key []byte) { C.rocksdb_writebatch_delete_cf(wb.c, cf.c, cKey, C.size_t(len(key))) } -func (wb *WriteBatch) DeleteRange(beginKey []byte, endKey []byte) { - cBeginKey := byteToChar(beginKey) +// DeleteRange deletes keys that are between [startKey, endKey) +func (wb *WriteBatch) DeleteRange(startKey []byte, endKey []byte) { + cStartKey := byteToChar(startKey) cEndKey := byteToChar(endKey) - C.rocksdb_writebatch_delete_range(wb.c, cBeginKey, C.size_t(len(beginKey)), cEndKey, C.size_t(len(endKey))) + C.rocksdb_writebatch_delete_range(wb.c, cStartKey, C.size_t(len(startKey)), cEndKey, C.size_t(len(endKey))) } -func (wb *WriteBatch) DeleteRangeCF(cf *ColumnFamilyHandle, beginKey []byte, endKey []byte) { - cBeginKey := byteToChar(beginKey) +// DeleteRangeCF deletes keys that are between [startKey, endKey) and +// belong to a given column family +func (wb *WriteBatch) DeleteRangeCF(cf *ColumnFamilyHandle, startKey []byte, endKey []byte) { + cStartKey := byteToChar(startKey) cEndKey := byteToChar(endKey) - C.rocksdb_writebatch_delete_range_cf(wb.c, cf.c, cBeginKey, C.size_t(len(beginKey)), cEndKey, C.size_t(len(endKey))) + C.rocksdb_writebatch_delete_range_cf(wb.c, cf.c, cStartKey, C.size_t(len(startKey)), cEndKey, C.size_t(len(endKey))) } // Data returns the serialized version of this batch. From ce1489d2122b66e9d1eaaee17b8987edf75ecaf9 Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Mon, 3 Jun 2019 15:22:43 +0800 Subject: [PATCH 15/48] add test for DeleteRange --- write_batch_test.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/write_batch_test.go b/write_batch_test.go index d6000e57..72eeb36e 100644 --- a/write_batch_test.go +++ b/write_batch_test.go @@ -39,6 +39,18 @@ func TestWriteBatch(t *testing.T) { defer v2.Free() ensure.Nil(t, err) ensure.True(t, v2.Data() == nil) + + // DeleteRange test + wb.Clear() + wb.DeleteRange(givenKey1, givenKey2) + + // perform the batch + ensure.Nil(t, db.Write(wo, wb)) + + v1, err = db.Get(ro, givenKey1) + defer v1.Free() + ensure.Nil(t, err) + ensure.True(t, v1.Data() == nil) } func TestWriteBatchIterator(t *testing.T) { From 5444b9d08e6c521fea36d12ec3963cc499d08814 Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Mon, 17 Jun 2019 13:48:33 +0800 Subject: [PATCH 16/48] change int to uint64 --- cache.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cache.go b/cache.go index ed708d7f..866326dc 100644 --- a/cache.go +++ b/cache.go @@ -9,7 +9,7 @@ type Cache struct { } // NewLRUCache creates a new LRU Cache object with the capacity given. -func NewLRUCache(capacity int) *Cache { +func NewLRUCache(capacity uint64) *Cache { return NewNativeCache(C.rocksdb_cache_create_lru(C.size_t(capacity))) } @@ -19,13 +19,13 @@ func NewNativeCache(c *C.rocksdb_cache_t) *Cache { } // GetUsage returns the Cache memory usage. -func (c *Cache) GetUsage() int { - return int(C.rocksdb_cache_get_usage(c.c)) +func (c *Cache) GetUsage() uint64 { + return uint64(C.rocksdb_cache_get_usage(c.c)) } // GetPinnedUsage returns the Cache pinned memory usage. -func (c *Cache) GetPinnedUsage() int { - return int(C.rocksdb_cache_get_pinned_usage(c.c)) +func (c *Cache) GetPinnedUsage() uint64 { + return uint64(C.rocksdb_cache_get_pinned_usage(c.c)) } // Destroy deallocates the Cache object. From a5447a1345d8f4e10a5462195189926a6babf95c Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Mon, 24 Jun 2019 13:20:18 +0800 Subject: [PATCH 17/48] add SetEnablePipelinedWrite --- options.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/options.go b/options.go index 0f1376ad..a717bf89 100644 --- a/options.go +++ b/options.go @@ -829,6 +829,13 @@ func (opts *Options) SetWalSizeLimitMb(value uint64) { C.rocksdb_options_set_WAL_size_limit_MB(opts.c, C.uint64_t(value)) } +// SetEnablePipelinedWrite enables pipelined write +// +// Default: false +func (opts *Options) SetEnablePipelinedWrite(value bool) { + C.rocksdb_options_set_enable_pipelined_write(opts.c, boolToChar(value)) +} + // SetManifestPreallocationSize sets the number of bytes // to preallocate (via fallocate) the manifest files. // From 96eadb84f109a95d78b6a8e603538e248283b7b9 Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Mon, 24 Jun 2019 13:47:57 +0800 Subject: [PATCH 18/48] improve the PR of wal iterator --- db.go | 6 +-- db_test.go | 4 +- wal_iterator.go | 14 ++--- wal_iterator_test.go | 119 ------------------------------------------- 4 files changed, 13 insertions(+), 130 deletions(-) delete mode 100755 wal_iterator_test.go diff --git a/db.go b/db.go index d631388b..52c37a07 100755 --- a/db.go +++ b/db.go @@ -504,13 +504,13 @@ func (db *DB) NewIteratorCF(opts *ReadOptions, cf *ColumnFamilyHandle) *Iterator return NewNativeIterator(unsafe.Pointer(cIter)) } -func (db *DB) GetUpdatesSince (seq_number uint64) *WalIterator { +func (db *DB) GetUpdatesSince(seqNumber uint64) *WalIterator { var cErr *C.char - cIter := C.rocksdb_get_updates_since(db.c, C.uint64_t(seq_number), nil, &cErr) + cIter := C.rocksdb_get_updates_since(db.c, C.uint64_t(seqNumber), nil, &cErr) return NewNativeWalIterator(unsafe.Pointer(cIter)) } -func (db *DB) GetLatestSequenceNumber () uint64 { +func (db *DB) GetLatestSequenceNumber() uint64 { return uint64(C.rocksdb_get_latest_sequence_number(db.c)) } diff --git a/db_test.go b/db_test.go index 8f193f08..3c0df0a1 100755 --- a/db_test.go +++ b/db_test.go @@ -110,8 +110,8 @@ func newTestDB(t *testing.T, name string, applyOpts func(opts *Options)) *DB { opts := NewDefaultOptions() // test the ratelimiter - //rateLimiter := NewRateLimiter(1024, 100*1000, 10) - //opts.SetRateLimiter(rateLimiter) + rateLimiter := NewRateLimiter(1024, 100*1000, 10) + opts.SetRateLimiter(rateLimiter) opts.SetCreateIfMissing(true) if applyOpts != nil { applyOpts(opts) diff --git a/wal_iterator.go b/wal_iterator.go index bc801bd8..2415f6b1 100755 --- a/wal_iterator.go +++ b/wal_iterator.go @@ -1,8 +1,10 @@ package gorocksdb + // #include // #include "rocksdb/c.h" import "C" import ( + "errors" "unsafe" ) @@ -22,14 +24,14 @@ func (iter *WalIterator) Next() { C.rocksdb_wal_iter_next(iter.c) } -func (iter *WalIterator) Status() string { - var cErr *C.char +func (iter *WalIterator) Err() error { + var cErr *C.char C.rocksdb_wal_iter_status(iter.c, &cErr) if cErr != nil { defer C.free(unsafe.Pointer(cErr)) - return C.GoString(cErr) + return errors.New(C.GoString(cErr)) } - return "unknown" + return nil } func (iter *WalIterator) Destroy() { @@ -37,8 +39,8 @@ func (iter *WalIterator) Destroy() { iter.c = nil } -func (iter *WalIterator) Batch() (*WriteBatch, uint64) { +func (iter *WalIterator) GetBatch() (*WriteBatch, uint64) { var cSeq C.uint64_t cB := C.rocksdb_wal_iter_get_batch(iter.c, &cSeq) return NewNativeWriteBatch(cB), uint64(cSeq) -} \ No newline at end of file +} diff --git a/wal_iterator_test.go b/wal_iterator_test.go deleted file mode 100755 index 0d214f68..00000000 --- a/wal_iterator_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package gorocksdb - -import ( - "testing" - - "github.com/facebookgo/ensure" - "fmt" - "time" - "math/rand" - - "io/ioutil" -) - -func SlowWriter (db *DB, count int, name string, cf *ColumnFamilyHandle) { - wo := NewDefaultWriteOptions() - for i:=0; i WRITE ERROR", err.Error()) - } else { - //fmt.Printf("> %d %s\n", i, key) - } - time.Sleep(time.Duration(rand.Int()%10)) // 0..9.99ms - if (i+1)%100 == 0 { - fmt.Printf("generated %d records\n", i+1) - time.Sleep(time.Second) - } - } - fmt.Println(">i think i am done", name) -} - -func TestWalIterator(t *testing.T) { - dir, err := ioutil.TempDir("", "gorocksdb-wal-cf") - fmt.Println("DIR", dir) - if err!=nil { - t.Fail() - t.Log(err.Error()) - return - } - var cf_names = []string{"default", "one", "two", "three"} - - opts := NewDefaultOptions() - opts.SetCreateIfMissing(true) - opts.SetCreateIfMissingColumnFamilies(true) - opts.SetWALTtlSeconds(1) - - var cfopts = []*Options{opts, opts, opts, opts} - - db, handles, err := OpenDbColumnFamilies(opts, dir, cf_names, cfopts) - if err!=nil { - t.Fail() - t.Log(err.Error()) - return - } - //db := newTestDB(t, "TestWalIterator", nil) - - _ = handles - - //wo := NewDefaultWriteOptions() - //db.Put(wo, []byte("start_key"), []byte("value")) - count := 1<<10 - go SlowWriter(db, count>>2, "one", handles[1]) - go SlowWriter(db, count>>2, "two", handles[2]) - go SlowWriter(db, count>>2, "three", handles[3]) - go SlowWriter(db, count>>2, "default", handles[0]) - var i int - var seq uint64 - var iter *WalIterator - cfCount := [4]int{0,0,0,0} - for iseq { - seq = newSeq - //fmt.Printf("< %d ", seq) - for bi := batch.NewIterator(); bi.Next(); { - rec := bi.Record() - fmt.Printf("%d '%s' (%d)\n", rec.CF, string(rec.Key), seq) - i++ - cfCount[rec.CF]++ - } - } else { - seq++ // :( - } - //fmt.Println() - batch.Destroy() - } - if iter!=nil { - iter.Destroy() - } - fmt.Println(">2,count>>2,count>>2,count>>2,}) - - for i:=0; i Date: Mon, 24 Jun 2019 13:57:33 +0800 Subject: [PATCH 19/48] format code --- checkpoint_test.go | 3 ++- memory_usage.go | 2 +- util.go | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/checkpoint_test.go b/checkpoint_test.go index 9505740d..1ea10fdb 100644 --- a/checkpoint_test.go +++ b/checkpoint_test.go @@ -1,10 +1,11 @@ package gorocksdb import ( - "github.com/facebookgo/ensure" "io/ioutil" "os" "testing" + + "github.com/facebookgo/ensure" ) func TestCheckpoint(t *testing.T) { diff --git a/memory_usage.go b/memory_usage.go index 740b877d..e57046ab 100644 --- a/memory_usage.go +++ b/memory_usage.go @@ -55,4 +55,4 @@ func GetApproximateMemoryUsageByType(dbs []*DB, caches []*Cache) (*MemoryUsage, CacheTotal: uint64(C.rocksdb_approximate_memory_usage_get_cache_total(memoryUsage)), } return result, nil -} \ No newline at end of file +} diff --git a/util.go b/util.go index ae099cd3..b6637ec3 100644 --- a/util.go +++ b/util.go @@ -1,4 +1,5 @@ package gorocksdb + // #include import "C" From 20a9aacdcf5b1b94b5d622a71dfc6496e6d6cb33 Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Tue, 25 Jun 2019 14:59:38 +0800 Subject: [PATCH 20/48] add err ret for GetUpdatesSince --- db.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/db.go b/db.go index 52c37a07..3b33cd7a 100755 --- a/db.go +++ b/db.go @@ -504,10 +504,14 @@ func (db *DB) NewIteratorCF(opts *ReadOptions, cf *ColumnFamilyHandle) *Iterator return NewNativeIterator(unsafe.Pointer(cIter)) } -func (db *DB) GetUpdatesSince(seqNumber uint64) *WalIterator { +func (db *DB) GetUpdatesSince(seqNumber uint64) (*WalIterator, error) { var cErr *C.char cIter := C.rocksdb_get_updates_since(db.c, C.uint64_t(seqNumber), nil, &cErr) - return NewNativeWalIterator(unsafe.Pointer(cIter)) + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return nil, errors.New(C.GoString(cErr)) + } + return NewNativeWalIterator(unsafe.Pointer(cIter)), nil } func (db *DB) GetLatestSequenceNumber() uint64 { From 283695771fbefa34fd0f19cde85b624a78c13027 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Mon, 6 May 2019 16:44:51 +0200 Subject: [PATCH 21/48] Add bindings for lowering CPU and IO priority of thread pools --- env.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/env.go b/env.go index 386335bc..dfdd583a 100644 --- a/env.go +++ b/env.go @@ -33,6 +33,22 @@ func (env *Env) SetHighPriorityBackgroundThreads(n int) { C.rocksdb_env_set_high_priority_background_threads(env.c, C.int(n)) } +func (env *Env) LowerThreadPoolIOPriority() { + C.rocksdb_env_lower_thread_pool_io_priority(env.c) +} + +func (env *Env) LowerHighPriorityThreadPoolIOPriority() { + C.rocksdb_env_lower_high_priority_thread_pool_io_priority(env.c) +} + +func (env *Env) LowerThreadPoolCPUPriority() { + C.rocksdb_env_lower_thread_pool_cpu_priority(env.c) +} + +func (env *Env) LowerHighPriorityThreadPoolCPUPriority() { + C.rocksdb_env_lower_high_priority_thread_pool_cpu_priority(env.c) +} + // Destroy deallocates the Env object. func (env *Env) Destroy() { C.rocksdb_env_destroy(env.c) From 014684fd5bee653ace75ff6d2d00b317e52a543a Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 25 Jun 2019 13:40:55 +0200 Subject: [PATCH 22/48] Add some helpers for Slice usage --- db_test.go | 37 ++++++++++++++++++++++++++++--------- slice.go | 8 +++++++- 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/db_test.go b/db_test.go index 3c0df0a1..4ccc7aa8 100644 --- a/db_test.go +++ b/db_test.go @@ -19,9 +19,8 @@ func TestDBCRUD(t *testing.T) { var ( givenKey = []byte("hello") - givenVal1 = []byte("world1") - givenVal2 = []byte("world2") - givenVal3 = []byte("world2") + givenVal1 = []byte("") + givenVal2 = []byte("world1") wo = NewDefaultWriteOptions() ro = NewDefaultReadOptions() ) @@ -46,7 +45,7 @@ func TestDBCRUD(t *testing.T) { v3, err := db.GetPinned(ro, givenKey) defer v3.Destroy() ensure.Nil(t, err) - ensure.DeepEqual(t, v3.Data(), givenVal3) + ensure.DeepEqual(t, v3.Data(), givenVal2) // delete ensure.Nil(t, db.Delete(wo, givenKey)) @@ -58,7 +57,7 @@ func TestDBCRUD(t *testing.T) { v5, err := db.GetPinned(ro, givenKey) defer v5.Destroy() ensure.Nil(t, err) - ensure.Nil(t, v5.Data()) + ensure.True(t, v5.Data() == nil) } func TestDBCRUDDBPaths(t *testing.T) { @@ -75,12 +74,20 @@ func TestDBCRUDDBPaths(t *testing.T) { var ( givenKey = []byte("hello") - givenVal1 = []byte("world1") - givenVal2 = []byte("world2") + givenVal1 = []byte("") + givenVal2 = []byte("world1") + givenVal3 = []byte("world2") wo = NewDefaultWriteOptions() ro = NewDefaultReadOptions() ) + // retrieve before create + noexist, err := db.Get(ro, givenKey) + defer noexist.Free() + ensure.Nil(t, err) + ensure.False(t, noexist.Exists()) + ensure.DeepEqual(t, noexist.Data(), []byte(nil)) + // create ensure.Nil(t, db.Put(wo, givenKey, givenVal1)) @@ -88,6 +95,7 @@ func TestDBCRUDDBPaths(t *testing.T) { v1, err := db.Get(ro, givenKey) defer v1.Free() ensure.Nil(t, err) + ensure.True(t, v1.Exists()) ensure.DeepEqual(t, v1.Data(), givenVal1) // update @@ -95,13 +103,24 @@ func TestDBCRUDDBPaths(t *testing.T) { v2, err := db.Get(ro, givenKey) defer v2.Free() ensure.Nil(t, err) + ensure.True(t, v2.Exists()) ensure.DeepEqual(t, v2.Data(), givenVal2) + // update + ensure.Nil(t, db.Put(wo, givenKey, givenVal3)) + v3, err := db.Get(ro, givenKey) + defer v3.Free() + ensure.Nil(t, err) + ensure.True(t, v3.Exists()) + ensure.DeepEqual(t, v3.Data(), givenVal3) + // delete ensure.Nil(t, db.Delete(wo, givenKey)) - v3, err := db.Get(ro, givenKey) + v4, err := db.Get(ro, givenKey) + defer v4.Free() ensure.Nil(t, err) - ensure.True(t, v3.Data() == nil) + ensure.False(t, v4.Exists()) + ensure.DeepEqual(t, v4.Data(), []byte(nil)) } func newTestDB(t *testing.T, name string, applyOpts func(opts *Options)) *DB { diff --git a/slice.go b/slice.go index bcdc4c16..b450daa3 100644 --- a/slice.go +++ b/slice.go @@ -32,7 +32,8 @@ func StringToSlice(data string) *Slice { return NewSlice(C.CString(data), C.size_t(len(data))) } -// Data returns the data of the slice. +// Data returns the data of the slice. If the key doesn't exist this will be a +// nil slice. func (s *Slice) Data() []byte { return charToByte(s.data, s.size) } @@ -42,6 +43,11 @@ func (s *Slice) Size() int { return int(s.size) } +// Exists returns if the key exists +func (s *Slice) Exists() bool { + return s.data != nil +} + // Free frees the slice data. func (s *Slice) Free() { if !s.freed { From 972a7642772c3b945969464fd9f6ec88f86863b8 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 25 Jun 2019 13:43:22 +0200 Subject: [PATCH 23/48] Use xenial for travis --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 79a7bb6c..e22ede6b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,4 @@ +dist: xenial language: go go: - 1.11 From a72a84b502db7fbe63a08a41bc3f713eaf13aea3 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 25 Jun 2019 13:46:59 +0200 Subject: [PATCH 24/48] Add -ldl needed apparently --- dynflag.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dynflag.go b/dynflag.go index 81909317..501a4dd7 100644 --- a/dynflag.go +++ b/dynflag.go @@ -2,5 +2,5 @@ package gorocksdb -// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy +// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -ldl import "C" From 99b1b1efbc490c6b67a31cd986a577b1e77d1f87 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 25 Jun 2019 13:46:59 +0200 Subject: [PATCH 25/48] Add -ldl needed apparently --- dynflag.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dynflag.go b/dynflag.go index 70072bb0..0b5a7f76 100644 --- a/dynflag.go +++ b/dynflag.go @@ -1,4 +1,4 @@ package gorocksdb -// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy +// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -ldl import "C" From c0434aa25bf78bddcf82c81225b042fdfbfb5408 Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Thu, 27 Jun 2019 12:04:30 +0800 Subject: [PATCH 26/48] add SetWALRecoveryMode --- options.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/options.go b/options.go index a717bf89..1d650ea5 100644 --- a/options.go +++ b/options.go @@ -60,6 +60,15 @@ const ( FatalInfoLogLevel = InfoLogLevel(4) ) +type WALRecoveryMode int + +const ( + TolerateCorruptedTailRecords = 0 + AbsoluteConsistency = 1 + PointInTime = 2 + SkipAnyCorruptedRecords = 3 +) + // Options represent all of the available options when opening a database with Open. type Options struct { c *C.rocksdb_options_t @@ -801,6 +810,14 @@ func (opts *Options) SetDisableAutoCompactions(value bool) { C.rocksdb_options_set_disable_auto_compactions(opts.c, C.int(btoi(value))) } +// SetWALRecoveryMode sets the recovery mode +// +// Recovery mode to control the consistency while replaying WAL +// Default: PointInTime +func (opts *Options) SetWALRecoveryMode(mode WALRecoveryMode) { + C.rocksdb_options_set_wal_recovery_mode(opts.c, C.int(mode)) +} + // SetWALTtlSeconds sets the WAL ttl in seconds. // // The following two options affect how archived logs will be deleted. From 7930eb8ac5373d0b3c3e55feff3c1554d00e45f9 Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Thu, 27 Jun 2019 12:32:06 +0800 Subject: [PATCH 27/48] add SetWALRecoveryMode --- options.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/options.go b/options.go index 1d650ea5..af9154f6 100644 --- a/options.go +++ b/options.go @@ -63,10 +63,10 @@ const ( type WALRecoveryMode int const ( - TolerateCorruptedTailRecords = 0 - AbsoluteConsistency = 1 - PointInTime = 2 - SkipAnyCorruptedRecords = 3 + TolerateCorruptedTailRecordsRecovery = 0 + AbsoluteConsistencyRecovery = 1 + PointInTimeRecovery = 2 + SkipAnyCorruptedRecordsRecovery = 3 ) // Options represent all of the available options when opening a database with Open. @@ -813,7 +813,7 @@ func (opts *Options) SetDisableAutoCompactions(value bool) { // SetWALRecoveryMode sets the recovery mode // // Recovery mode to control the consistency while replaying WAL -// Default: PointInTime +// Default: PointInTimeRecovery func (opts *Options) SetWALRecoveryMode(mode WALRecoveryMode) { C.rocksdb_options_set_wal_recovery_mode(opts.c, C.int(mode)) } From b3e64112cd8c259cdf43d8bd81e5a5ab2f1bccdb Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Mon, 8 Jul 2019 11:43:47 +0800 Subject: [PATCH 28/48] correct SetWALRecoveryMode documentation --- db.go | 2 +- options.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/db.go b/db.go index 3b33cd7a..72652e28 100755 --- a/db.go +++ b/db.go @@ -780,7 +780,7 @@ func (db *DB) DeleteFile(name string) { C.rocksdb_delete_file(db.c, cName) } -// DeleteFileInRange deletes SST files that contain keys between the Range, [r.Start, limitKey] +// DeleteFileInRange deletes SST files that contain keys between the Range, [r.Start, r.Limit] func (db *DB) DeleteFileInRange(r Range) error { cStartKey := byteToChar(r.Start) cLimitKey := byteToChar(r.Limit) diff --git a/options.go b/options.go index af9154f6..fb846d3b 100644 --- a/options.go +++ b/options.go @@ -63,10 +63,10 @@ const ( type WALRecoveryMode int const ( - TolerateCorruptedTailRecordsRecovery = 0 - AbsoluteConsistencyRecovery = 1 - PointInTimeRecovery = 2 - SkipAnyCorruptedRecordsRecovery = 3 + TolerateCorruptedTailRecordsRecovery = WALRecoveryMode(0) + AbsoluteConsistencyRecovery = WALRecoveryMode(1) + PointInTimeRecovery = WALRecoveryMode(2) + SkipAnyCorruptedRecordsRecovery = WALRecoveryMode(3) ) // Options represent all of the available options when opening a database with Open. @@ -813,7 +813,7 @@ func (opts *Options) SetDisableAutoCompactions(value bool) { // SetWALRecoveryMode sets the recovery mode // // Recovery mode to control the consistency while replaying WAL -// Default: PointInTimeRecovery +// Default: TolerateCorruptedTailRecordsRecovery func (opts *Options) SetWALRecoveryMode(mode WALRecoveryMode) { C.rocksdb_options_set_wal_recovery_mode(opts.c, C.int(mode)) } From f6e26c0303067d12afda7cc896adbbb15bc98e3c Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Mon, 8 Jul 2019 11:54:04 +0800 Subject: [PATCH 29/48] improve documentation of WALIterator --- wal_iterator.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/wal_iterator.go b/wal_iterator.go index 2415f6b1..c836eab9 100755 --- a/wal_iterator.go +++ b/wal_iterator.go @@ -39,6 +39,9 @@ func (iter *WalIterator) Destroy() { iter.c = nil } +// C.rocksdb_wal_iter_get_batch in the official rocksdb c wrapper has memory leak +// see https://github.com/facebook/rocksdb/pull/5515 +// https://github.com/facebook/rocksdb/issues/5536 func (iter *WalIterator) GetBatch() (*WriteBatch, uint64) { var cSeq C.uint64_t cB := C.rocksdb_wal_iter_get_batch(iter.c, &cSeq) From b3df346ca07206d9edb426d8eafd395bc51e375a Mon Sep 17 00:00:00 2001 From: Yumin Xia Date: Tue, 16 Jul 2019 17:06:28 -0700 Subject: [PATCH 30/48] remove compression library flags --- dynflag.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dynflag.go b/dynflag.go index 501a4dd7..18c18f40 100644 --- a/dynflag.go +++ b/dynflag.go @@ -2,5 +2,5 @@ package gorocksdb -// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -ldl +// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -ldl import "C" From cbd8fd230ccae9d5ab6e13922fc14cab6c4f5f18 Mon Sep 17 00:00:00 2001 From: Kirill Abrosimov Date: Fri, 26 Jul 2019 11:45:02 +0300 Subject: [PATCH 31/48] added NewNoopPrefixTransform --- slice_transform.go | 5 +++++ slice_transform_test.go | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/slice_transform.go b/slice_transform.go index e66e4d84..67bd0b25 100644 --- a/slice_transform.go +++ b/slice_transform.go @@ -23,6 +23,11 @@ func NewFixedPrefixTransform(prefixLen int) SliceTransform { return NewNativeSliceTransform(C.rocksdb_slicetransform_create_fixed_prefix(C.size_t(prefixLen))) } +// NewNoopPrefixTransform creates a new no-op prefix transform. +func NewNoopPrefixTransform() SliceTransform { + return NewNativeSliceTransform(C.rocksdb_slicetransform_create_noop()) +} + // NewNativeSliceTransform creates a SliceTransform object. func NewNativeSliceTransform(c *C.rocksdb_slicetransform_t) SliceTransform { return nativeSliceTransform{c} diff --git a/slice_transform_test.go b/slice_transform_test.go index 1c551183..d60c7326 100644 --- a/slice_transform_test.go +++ b/slice_transform_test.go @@ -35,6 +35,13 @@ func TestFixedPrefixTransformOpen(t *testing.T) { defer db.Close() } +func TestNewNoopPrefixTransform(t *testing.T) { + db := newTestDB(t, "TestNewNoopPrefixTransform", func(opts *Options) { + opts.SetPrefixExtractor(NewNoopPrefixTransform()) + }) + defer db.Close() +} + type testSliceTransform struct { initiated bool } From 66c8e724b220eda4721e7c0efe62d0d8127c1c2a Mon Sep 17 00:00:00 2001 From: Kirill Abrosimov Date: Fri, 26 Jul 2019 11:47:05 +0300 Subject: [PATCH 32/48] added SetMemTablePrefixBloomSizeRatio() --- options.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/options.go b/options.go index 586e1f04..c1a22251 100644 --- a/options.go +++ b/options.go @@ -1166,6 +1166,17 @@ func (opts *Options) SetAllowIngestBehind(value bool) { C.rocksdb_options_set_allow_ingest_behind(opts.c, boolToChar(value)) } +// SetMemTablePrefixBloomSizeRatio sets memtable_prefix_bloom_size_ratio +// if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, +// create prefix bloom for memtable with the size of +// write_buffer_size * memtable_prefix_bloom_size_ratio. +// If it is larger than 0.25, it is sanitized to 0.25. +// +// Default: 0 (disable) +func (opts *Options) SetMemTablePrefixBloomSizeRatio(value float64) { + C.rocksdb_options_set_memtable_prefix_bloom_size_ratio(opts.c, C.double(value)) +} + // Destroy deallocates the Options object. func (opts *Options) Destroy() { C.rocksdb_options_destroy(opts.c) From c59f9365e2619083ee79f100e16f9c273a24170c Mon Sep 17 00:00:00 2001 From: Kirill Abrosimov Date: Fri, 26 Jul 2019 11:59:18 +0300 Subject: [PATCH 33/48] added SetOptimizeFiltersForHits() --- options.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/options.go b/options.go index c1a22251..1947aa0a 100644 --- a/options.go +++ b/options.go @@ -1177,6 +1177,25 @@ func (opts *Options) SetMemTablePrefixBloomSizeRatio(value float64) { C.rocksdb_options_set_memtable_prefix_bloom_size_ratio(opts.c, C.double(value)) } +// SetOptimizeFiltersForHits sets optimize_filters_for_hits +// This flag specifies that the implementation should optimize the filters +// mainly for cases where keys are found rather than also optimize for keys +// missed. This would be used in cases where the application knows that +// there are very few misses or the performance in the case of misses is not +// important. +// +// For now, this flag allows us to not store filters for the last level i.e +// the largest level which contains data of the LSM store. For keys which +// are hits, the filters in this level are not useful because we will search +// for the data anyway. NOTE: the filters in other levels are still useful +// even for key hit because they tell us whether to look in that level or go +// to the higher level. +// +// Default: false +func (opts *Options) SetOptimizeFiltersForHits(value bool) { + C.rocksdb_options_set_optimize_filters_for_hits(opts.c, C.boolToChar(value)) +} + // Destroy deallocates the Options object. func (opts *Options) Destroy() { C.rocksdb_options_destroy(opts.c) From 616329e3fa8aeddb6c4020e92eed50c8f01d3ada Mon Sep 17 00:00:00 2001 From: Kirill Abrosimov Date: Fri, 26 Jul 2019 12:05:04 +0300 Subject: [PATCH 34/48] fix build --- options.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/options.go b/options.go index 1947aa0a..d27e84f1 100644 --- a/options.go +++ b/options.go @@ -1193,7 +1193,7 @@ func (opts *Options) SetMemTablePrefixBloomSizeRatio(value float64) { // // Default: false func (opts *Options) SetOptimizeFiltersForHits(value bool) { - C.rocksdb_options_set_optimize_filters_for_hits(opts.c, C.boolToChar(value)) + C.rocksdb_options_set_optimize_filters_for_hits(opts.c, C.int(btoi(value))) } // Destroy deallocates the Options object. From 65294a35551cddc68edcf8aca52b211601e2fb8d Mon Sep 17 00:00:00 2001 From: Yumin Xia Date: Mon, 29 Jul 2019 15:21:50 -0700 Subject: [PATCH 35/48] remove ldflags if static flag is set --- staticflag_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staticflag_linux.go b/staticflag_linux.go index 6653c498..3af044ef 100644 --- a/staticflag_linux.go +++ b/staticflag_linux.go @@ -2,5 +2,5 @@ package gorocksdb -// #cgo LDFLAGS: -l:librocksdb.a -l:libstdc++.a -l:libz.a -l:libbz2.a -l:libsnappy.a -lm +// #cgo LDFLAGS: -l:librocksdb.a -l:libstdc++.a -lm -ldl import "C" From 739037077bd1f911b5c730d2c49f566b4bbc8aba Mon Sep 17 00:00:00 2001 From: bibby Date: Tue, 20 Aug 2019 17:33:44 -0400 Subject: [PATCH 36/48] Adding SetPrefixSameAsStart - this option was not exposed but is available in c.h - setting to true ensures a prefix-seek iterator only iterates over values matching the prefix ref: https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes#prefix-seek-api --- options_read.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/options_read.go b/options_read.go index 997fa163..6a37cc48 100644 --- a/options_read.go +++ b/options_read.go @@ -48,6 +48,17 @@ func (opts *ReadOptions) SetVerifyChecksums(value bool) { C.rocksdb_readoptions_set_verify_checksums(opts.c, boolToChar(value)) } +// SetPrefixSameAsStart Enforce that the iterator only iterates over the same +// prefix as the seek. +// This option is effective only for prefix seeks, i.e. prefix_extractor is +// non-null for the column family and total_order_seek is false. Unlike +// iterate_upper_bound, prefix_same_as_start only works within a prefix +// but in both directions. +// Default: false +func (opts *ReadOptions) SetPrefixSameAsStart(value bool) { + C.rocksdb_readoptions_set_prefix_same_as_start(opts.c, boolToChar(value)) +} + // SetFillCache specify whether the "data block"/"index block"/"filter block" // read for this iteration should be cached in memory? // Callers may wish to set this field to false for bulk scans. From 8354395dda16f82ab7139407ee55206a4c5cd50c Mon Sep 17 00:00:00 2001 From: jacky Date: Wed, 21 Aug 2019 16:01:28 +0800 Subject: [PATCH 37/48] Add PutLogData support for WriteBatch object, for users to build a replication system with RocksDB WAL --- write_batch.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/write_batch.go b/write_batch.go index e66c5b56..f894427b 100644 --- a/write_batch.go +++ b/write_batch.go @@ -41,6 +41,12 @@ func (wb *WriteBatch) PutCF(cf *ColumnFamilyHandle, key, value []byte) { C.rocksdb_writebatch_put_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value))) } +// Append a blob of arbitrary size to the records in this batch. +func (wb *WriteBatch) PutLogData(blob []byte) { + cBlob := byteToChar(blob) + C.rocksdb_writebatch_put_log_data(wb.c, cBlob, C.size_t(len(blob))) +} + // Merge queues a merge of "value" with the existing value of "key". func (wb *WriteBatch) Merge(key, value []byte) { cKey := byteToChar(key) From 18a098be267ff80f9ab0c6e197ee7891867a199a Mon Sep 17 00:00:00 2001 From: bibby Date: Fri, 13 Sep 2019 15:12:58 -0400 Subject: [PATCH 38/48] Problem The current PartialMerge function accepts a left and right operand and is called many times by this function: https://github.com/DataDog/gorocksdb/blob/master/merge_operator.go#L100 This means implementors may have to deserialize / unmarshal those byte arrays into comparable objects for merging many times. Proposal: Split the interface. MergeOperator only requires that you implement `FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool)`. In addition, you can implement either `PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool)` or `PartialMergeMulti(key []byte, operands [][]byte) ([]byte, bool)`. In `gorocksdb_mergeoperator_partial_merge_multi` I do a type switch to see if either PartialMerger or MultiMerger are implemented. This should result in the same functionality for all existing users, but give users the option to implement PartialMergeMulti if it is more efficient for their use case. Tests: I added tests that result in actual merges taking place. By setting a value, compacting, then doing multiple merge operations, the test ensures that both a PartialMerge and a FullMerge occur. --- merge_operator.go | 52 +++++++++++++--- merge_operator_test.go | 137 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 178 insertions(+), 11 deletions(-) diff --git a/merge_operator.go b/merge_operator.go index 33f83948..2de7f9ab 100644 --- a/merge_operator.go +++ b/merge_operator.go @@ -28,6 +28,14 @@ type MergeOperator interface { // internal corruption. This will be treated as an error by the library. FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) + // The name of the MergeOperator. + Name() string +} + +// PartialMerger implements PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, err) +// When a MergeOperator implements this interface, PartialMerge will be called in addition +// to FullMerge for compactions across levels +type PartialMerger interface { // This function performs merge(left_op, right_op) // when both the operands are themselves merge operation types // that you would have passed to a db.Merge() call in the same order @@ -42,9 +50,28 @@ type MergeOperator interface { // The library will internally keep track of the operations, and apply them in the // correct order once a base-value (a Put/Delete/End-of-Database) is seen. PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) +} - // The name of the MergeOperator. - Name() string +// MultiMerger implements PartialMergeMulti(key []byte, operands [][]byte) ([]byte, err) +// When a MergeOperator implements this interface, PartialMergeMulti will be called in addition +// to FullMerge for compactions across levels +type MultiMerger interface { + // PartialMerge performs merge on multiple operands + // when all of the operands are themselves merge operation types + // that you would have passed to a db.Merge() call in the same order + // (i.e.: db.Merge(key,operand[0]), followed by db.Merge(key,operand[1]), + // ... db.Merge(key, operand[n])). + // + // PartialMerge should combine them into a single merge operation. + // The return value should be constructed such that a call to + // db.Merge(key, new_value) would yield the same result as a call + // to db.Merge(key,operand[0]), followed by db.Merge(key,operand[1]), + // ... db.Merge(key, operand[n])). + // + // If it is impossible or infeasible to combine the operations, return false. + // The library will internally keep track of the operations, and apply them in the + // correct order once a base-value (a Put/Delete/End-of-Database) is seen. + PartialMergeMulti(key []byte, operands [][]byte) ([]byte, bool) } // NewNativeMergeOperator creates a MergeOperator object. @@ -110,13 +137,22 @@ func gorocksdb_mergeoperator_partial_merge_multi(idx int, cKey *C.char, cKeyLen success := true merger := mergeOperators.Get(idx).(mergeOperatorWrapper).mergeOperator - leftOperand := operands[0] - for i := 1; i < int(cNumOperands); i++ { - newValue, success = merger.PartialMerge(key, leftOperand, operands[i]) - if !success { - break + + // check if this MergeOperator supports partial or multi merges + switch v := merger.(type) { + case MultiMerger: + newValue, success = v.PartialMergeMulti(key, operands) + case PartialMerger: + leftOperand := operands[0] + for i := 1; i < int(cNumOperands); i++ { + newValue, success = v.PartialMerge(key, leftOperand, operands[i]) + if !success { + break + } + leftOperand = newValue } - leftOperand = newValue + default: + success = false } newValueLen := len(newValue) diff --git a/merge_operator_test.go b/merge_operator_test.go index fd7e0887..9dad6f78 100644 --- a/merge_operator_test.go +++ b/merge_operator_test.go @@ -40,15 +40,146 @@ func TestMergeOperator(t *testing.T) { ensure.DeepEqual(t, v1.Data(), givenMerged) } +func TestPartialMergeOperator(t *testing.T) { + var ( + givenKey = []byte("hello") + startingVal = []byte("foo") + mergeVal1 = []byte("bar") + mergeVal2 = []byte("baz") + fMergeResult = []byte("foobarbaz") + pMergeResult = []byte("barbaz") + ) + + merger := &mockMergePartialOperator{ + fullMerge: func(key, existingValue []byte, operands [][]byte) ([]byte, bool) { + ensure.DeepEqual(&fatalAsError{t}, key, givenKey) + ensure.DeepEqual(&fatalAsError{t}, existingValue, startingVal) + ensure.DeepEqual(&fatalAsError{t}, operands[0], pMergeResult) + return fMergeResult, true + }, + partialMerge: func(key, leftOperand, rightOperand []byte) ([]byte, bool) { + ensure.DeepEqual(&fatalAsError{t}, key, givenKey) + ensure.DeepEqual(&fatalAsError{t}, leftOperand, mergeVal1) + ensure.DeepEqual(&fatalAsError{t}, rightOperand, mergeVal2) + return pMergeResult, true + }, + } + db := newTestDB(t, "TestMergeOperator", func(opts *Options) { + opts.SetMergeOperator(merger) + }) + defer db.Close() + + wo := NewDefaultWriteOptions() + defer wo.Destroy() + + // insert a starting value and compact to trigger merges + ensure.Nil(t, db.Put(wo, givenKey, startingVal)) + + // trigger a compaction to ensure that a merge is performed + db.CompactRange(Range{nil, nil}) + + // we expect these two operands to be passed to merge partial + ensure.Nil(t, db.Merge(wo, givenKey, mergeVal1)) + ensure.Nil(t, db.Merge(wo, givenKey, mergeVal2)) + + // trigger a compaction to ensure that a + // partial and full merge are performed + db.CompactRange(Range{nil, nil}) + + ro := NewDefaultReadOptions() + v1, err := db.Get(ro, givenKey) + defer v1.Free() + ensure.Nil(t, err) + ensure.DeepEqual(t, v1.Data(), fMergeResult) + +} + +func TestMergeMultiOperator(t *testing.T) { + var ( + givenKey = []byte("hello") + startingVal = []byte("foo") + mergeVal1 = []byte("bar") + mergeVal2 = []byte("baz") + fMergeResult = []byte("foobarbaz") + pMergeResult = []byte("barbaz") + ) + + merger := &mockMergeMultiOperator{ + fullMerge: func(key, existingValue []byte, operands [][]byte) ([]byte, bool) { + ensure.DeepEqual(&fatalAsError{t}, key, givenKey) + ensure.DeepEqual(&fatalAsError{t}, existingValue, startingVal) + ensure.DeepEqual(&fatalAsError{t}, operands[0], pMergeResult) + return fMergeResult, true + }, + partialMergeMulti: func(key []byte, operands [][]byte) ([]byte, bool) { + ensure.DeepEqual(&fatalAsError{t}, key, givenKey) + ensure.DeepEqual(&fatalAsError{t}, operands[0], mergeVal1) + ensure.DeepEqual(&fatalAsError{t}, operands[1], mergeVal2) + return pMergeResult, true + }, + } + db := newTestDB(t, "TestMergeOperator", func(opts *Options) { + opts.SetMergeOperator(merger) + }) + defer db.Close() + + wo := NewDefaultWriteOptions() + defer wo.Destroy() + + // insert a starting value and compact to trigger merges + ensure.Nil(t, db.Put(wo, givenKey, startingVal)) + + // trigger a compaction to ensure that a merge is performed + db.CompactRange(Range{nil, nil}) + + // we expect these two operands to be passed to merge multi + ensure.Nil(t, db.Merge(wo, givenKey, mergeVal1)) + ensure.Nil(t, db.Merge(wo, givenKey, mergeVal2)) + + // trigger a compaction to ensure that a + // partial and full merge are performed + db.CompactRange(Range{nil, nil}) + + ro := NewDefaultReadOptions() + v1, err := db.Get(ro, givenKey) + defer v1.Free() + ensure.Nil(t, err) + ensure.DeepEqual(t, v1.Data(), fMergeResult) + +} + +// Mock Objects type mockMergeOperator struct { - fullMerge func(key, existingValue []byte, operands [][]byte) ([]byte, bool) - partialMerge func(key, leftOperand, rightOperand []byte) ([]byte, bool) + fullMerge func(key, existingValue []byte, operands [][]byte) ([]byte, bool) } func (m *mockMergeOperator) Name() string { return "gorocksdb.test" } func (m *mockMergeOperator) FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) { return m.fullMerge(key, existingValue, operands) } -func (m *mockMergeOperator) PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) { + +type mockMergeMultiOperator struct { + fullMerge func(key, existingValue []byte, operands [][]byte) ([]byte, bool) + partialMergeMulti func(key []byte, operands [][]byte) ([]byte, bool) +} + +func (m *mockMergeMultiOperator) Name() string { return "gorocksdb.multi" } +func (m *mockMergeMultiOperator) FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) { + return m.fullMerge(key, existingValue, operands) +} +func (m *mockMergeMultiOperator) PartialMergeMulti(key []byte, operands [][]byte) ([]byte, bool) { + return m.partialMergeMulti(key, operands) +} + +type mockMergePartialOperator struct { + fullMerge func(key, existingValue []byte, operands [][]byte) ([]byte, bool) + partialMerge func(key, leftOperand, rightOperand []byte) ([]byte, bool) +} + +func (m *mockMergePartialOperator) Name() string { return "gorocksdb.partial" } +func (m *mockMergePartialOperator) FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) { + return m.fullMerge(key, existingValue, operands) +} +func (m *mockMergePartialOperator) PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) { return m.partialMerge(key, leftOperand, rightOperand) } From f0de349575ca7c30f05a6840f8bba18c6b95ca83 Mon Sep 17 00:00:00 2001 From: ENDOH takanao Date: Fri, 20 Sep 2019 12:05:54 +0900 Subject: [PATCH 39/48] Workaround for Linux 32-bit build --- db.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/db.go b/db.go index 3b33cd7a..9a88a4d4 100755 --- a/db.go +++ b/db.go @@ -216,7 +216,10 @@ func ListColumnFamilies(opts *Options, name string) ([]string, error) { } namesLen := int(cLen) names := make([]string, namesLen) - cNamesArr := (*[1 << 30]*C.char)(unsafe.Pointer(cNames))[:namesLen:namesLen] + // The maximum capacity of the following two slices is limited to (2^29)-1 to remain compatible + // with 32-bit platforms. The size of a `*C.char` (a pointer) is 4 Byte on a 32-bit system + // and (2^29)*4 == math.MaxInt32 + 1. -- See issue golang/go#13656 + cNamesArr := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(cNames))[:namesLen:namesLen] for i, n := range cNamesArr { names[i] = C.GoString(n) } From f43d6eebcd10c35dde7f41c211c90b5c5d1064e0 Mon Sep 17 00:00:00 2001 From: anan Date: Wed, 25 Sep 2019 14:14:55 +0800 Subject: [PATCH 40/48] use rocksdb_free to fix windows run crash --- db.go | 20 ++++++++++---------- memory_usage.go | 2 +- options.go | 4 ++-- slice.go | 2 +- slice_transform.go | 2 +- transaction.go | 2 +- wal_iterator.go | 2 +- 7 files changed, 17 insertions(+), 17 deletions(-) diff --git a/db.go b/db.go index 01824c4e..64735c61 100755 --- a/db.go +++ b/db.go @@ -51,7 +51,7 @@ func OpenDbWithTTL(opts *Options, name string, ttl int) (*DB, error) { defer C.free(unsafe.Pointer(cName)) db := C.rocksdb_open_with_ttl(opts.c, cName, C.int(ttl), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return &DB{ @@ -267,7 +267,7 @@ func (db *DB) GetBytes(opts *ReadOptions, key []byte) ([]byte, error) { if cValue == nil { return nil, nil } - defer C.free(unsafe.Pointer(cValue)) + defer C.rocksdb_free(unsafe.Pointer(cValue)) return C.GoBytes(unsafe.Pointer(cValue), C.int(cValLen)), nil } @@ -294,7 +294,7 @@ func (db *DB) GetPinned(opts *ReadOptions, key []byte) (*PinnableSliceHandle, er ) cHandle := C.rocksdb_get_pinned(db.c, opts.c, cKey, C.size_t(len(key)), &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewNativePinnableSliceHandle(cHandle), nil @@ -323,7 +323,7 @@ func (db *DB) MultiGet(opts *ReadOptions, keys ...[]byte) (Slices, error) { for i, rocksErr := range rocksErrs { if rocksErr != nil { - defer C.free(unsafe.Pointer(rocksErr)) + defer C.rocksdb_free(unsafe.Pointer(rocksErr)) err := fmt.Errorf("getting %q failed: %v", string(keys[i]), C.GoString(rocksErr)) errs = append(errs, err) } @@ -375,7 +375,7 @@ func (db *DB) MultiGetCFMultiCF(opts *ReadOptions, cfs ColumnFamilyHandles, keys for i, rocksErr := range rocksErrs { if rocksErr != nil { - defer C.free(unsafe.Pointer(rocksErr)) + defer C.rocksdb_free(unsafe.Pointer(rocksErr)) err := fmt.Errorf("getting %q failed: %v", string(keys[i]), C.GoString(rocksErr)) errs = append(errs, err) } @@ -511,7 +511,7 @@ func (db *DB) GetUpdatesSince(seqNumber uint64) (*WalIterator, error) { var cErr *C.char cIter := C.rocksdb_get_updates_since(db.c, C.uint64_t(seqNumber), nil, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewNativeWalIterator(unsafe.Pointer(cIter)), nil @@ -538,7 +538,7 @@ func (db *DB) GetProperty(propName string) string { cprop := C.CString(propName) defer C.free(unsafe.Pointer(cprop)) cValue := C.rocksdb_property_value(db.c, cprop) - defer C.free(unsafe.Pointer(cValue)) + defer C.rocksdb_free(unsafe.Pointer(cValue)) return C.GoString(cValue) } @@ -685,7 +685,7 @@ func (db *DB) SetOptions(keys, values []string) error { &cErr, ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -798,7 +798,7 @@ func (db *DB) DeleteFileInRange(r Range) error { ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -821,7 +821,7 @@ func (db *DB) DeleteFileInRangeCF(cf *ColumnFamilyHandle, r Range) error { ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/memory_usage.go b/memory_usage.go index e57046ab..7b9a6ad6 100644 --- a/memory_usage.go +++ b/memory_usage.go @@ -42,7 +42,7 @@ func GetApproximateMemoryUsageByType(dbs []*DB, caches []*Cache) (*MemoryUsage, var cErr *C.char memoryUsage := C.rocksdb_approximate_memory_usage_create(consumers, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } diff --git a/options.go b/options.go index 05e3fee7..5f80d1af 100644 --- a/options.go +++ b/options.go @@ -111,7 +111,7 @@ func GetOptionsFromString(base *Options, optStr string) (*Options, error) { newOpt := NewDefaultOptions() C.rocksdb_get_options_from_string(base.c, cOptStr, newOpt.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } @@ -991,7 +991,7 @@ func (opts *Options) SetFIFOCompactionOptions(value *FIFOCompactionOptions) { // GetStatisticsString returns the statistics as a string. func (opts *Options) GetStatisticsString() string { sString := C.rocksdb_options_statistics_get_string(opts.c) - defer C.free(unsafe.Pointer(sString)) + defer C.rocksdb_free(unsafe.Pointer(sString)) return C.GoString(sString) } diff --git a/slice.go b/slice.go index b450daa3..707a1f2e 100644 --- a/slice.go +++ b/slice.go @@ -51,7 +51,7 @@ func (s *Slice) Exists() bool { // Free frees the slice data. func (s *Slice) Free() { if !s.freed { - C.free(unsafe.Pointer(s.data)) + C.rocksdb_free(unsafe.Pointer(s.data)) s.freed = true } } diff --git a/slice_transform.go b/slice_transform.go index 67bd0b25..8b9b2362 100644 --- a/slice_transform.go +++ b/slice_transform.go @@ -25,7 +25,7 @@ func NewFixedPrefixTransform(prefixLen int) SliceTransform { // NewNoopPrefixTransform creates a new no-op prefix transform. func NewNoopPrefixTransform() SliceTransform { - return NewNativeSliceTransform(C.rocksdb_slicetransform_create_noop()) + return NewNativeSliceTransform(C.rocksdb_slicetransform_create_noop()) } // NewNativeSliceTransform creates a SliceTransform object. diff --git a/transaction.go b/transaction.go index c0fc61d1..67c9ef09 100644 --- a/transaction.go +++ b/transaction.go @@ -74,7 +74,7 @@ func (transaction *Transaction) GetForUpdate(opts *ReadOptions, key []byte) (*Sl transaction.c, opts.c, cKey, C.size_t(len(key)), &cValLen, C.uchar(byte(1)) /*exclusive*/, &cErr, ) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewSlice(cValue, cValLen), nil diff --git a/wal_iterator.go b/wal_iterator.go index c836eab9..7805d7c9 100755 --- a/wal_iterator.go +++ b/wal_iterator.go @@ -28,7 +28,7 @@ func (iter *WalIterator) Err() error { var cErr *C.char C.rocksdb_wal_iter_status(iter.c, &cErr) if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) + defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil From bb1c9e93940ae6910c8d98c52557954af759f399 Mon Sep 17 00:00:00 2001 From: Thomas Adam Date: Mon, 30 Sep 2019 10:34:08 +0200 Subject: [PATCH 41/48] Revert "Add bindings for lowering CPU and IO priority of thread pools" --- env.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/env.go b/env.go index dfdd583a..386335bc 100644 --- a/env.go +++ b/env.go @@ -33,22 +33,6 @@ func (env *Env) SetHighPriorityBackgroundThreads(n int) { C.rocksdb_env_set_high_priority_background_threads(env.c, C.int(n)) } -func (env *Env) LowerThreadPoolIOPriority() { - C.rocksdb_env_lower_thread_pool_io_priority(env.c) -} - -func (env *Env) LowerHighPriorityThreadPoolIOPriority() { - C.rocksdb_env_lower_high_priority_thread_pool_io_priority(env.c) -} - -func (env *Env) LowerThreadPoolCPUPriority() { - C.rocksdb_env_lower_thread_pool_cpu_priority(env.c) -} - -func (env *Env) LowerHighPriorityThreadPoolCPUPriority() { - C.rocksdb_env_lower_high_priority_thread_pool_cpu_priority(env.c) -} - // Destroy deallocates the Env object. func (env *Env) Destroy() { C.rocksdb_env_destroy(env.c) From 55ce39d543e8c91e387b8585888fdf205b2746b1 Mon Sep 17 00:00:00 2001 From: bibby Date: Wed, 16 Oct 2019 15:36:33 -0400 Subject: [PATCH 42/48] Attempting to destroy the opts.cst will segfault Reference: https://github.com/facebook/rocksdb/issues/1095 The cst has alredy been consumed when associated with the prefix_extractor --- options.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/options.go b/options.go index 5f80d1af..07000215 100644 --- a/options.go +++ b/options.go @@ -1202,9 +1202,8 @@ func (opts *Options) Destroy() { if opts.ccmp != nil { C.rocksdb_comparator_destroy(opts.ccmp) } - if opts.cst != nil { - C.rocksdb_slicetransform_destroy(opts.cst) - } + // don't destroy the opts.cst here, it has already been + // associated with a PrefixExtractor and this will segfault if opts.ccf != nil { C.rocksdb_compactionfilter_destroy(opts.ccf) } From a1e313dd9526957c8989c40e4237adf3e291a499 Mon Sep 17 00:00:00 2001 From: Mohak Shah Date: Fri, 4 Oct 2019 18:04:40 +0530 Subject: [PATCH 43/48] Add CreateNewBackupFlush alongside CreateNewBackup CreateNewBackupFlush, when flush is set to true, flushes the WAL before creating the backup. When flush is set to false, it acts exactly like CreateNewBackup and backups the WAL in a separate, non-atomic operation. --- backup.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/backup.go b/backup.go index 4b37379c..8de6c698 100644 --- a/backup.go +++ b/backup.go @@ -104,11 +104,12 @@ func (b *BackupEngine) UnsafeGetBackupEngine() unsafe.Pointer { return unsafe.Pointer(b.c) } -// CreateNewBackup takes a new backup from db. -func (b *BackupEngine) CreateNewBackup(db *DB) error { +// CreateNewBackupFlush takes a new backup from db. If flush is set to true, +// it flushes the WAL before taking the backup. +func (b *BackupEngine) CreateNewBackupFlush(db *DB, flush bool) error { var cErr *C.char - C.rocksdb_backup_engine_create_new_backup(b.c, db.c, &cErr) + C.rocksdb_backup_engine_create_new_backup_flush(b.c, db.c, boolToChar(flush), &cErr) if cErr != nil { defer C.rocksdb_free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) @@ -117,6 +118,11 @@ func (b *BackupEngine) CreateNewBackup(db *DB) error { return nil } +// CreateNewBackup takes a new backup from db. +func (b *BackupEngine) CreateNewBackup(db *DB) error { + return b.CreateNewBackupFlush(db, false) +} + // GetInfo gets an object that gives information about // the backups that have already been taken func (b *BackupEngine) GetInfo() *BackupEngineInfo { From 1c5d8be374bdc1684bb180ed41602df7fa8ecae3 Mon Sep 17 00:00:00 2001 From: Mohak Shah Date: Mon, 7 Oct 2019 15:40:24 +0530 Subject: [PATCH 44/48] Add the method PurgeOldBackups to BackupEngine --- backup.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/backup.go b/backup.go index 8de6c698..87621dd9 100644 --- a/backup.go +++ b/backup.go @@ -150,6 +150,17 @@ func (b *BackupEngine) RestoreDBFromLatestBackup(dbDir, walDir string, ro *Resto return nil } +// PurgeOldBackups deletes all backups older than the latest 'n' backups +func (b *BackupEngine) PurgeOldBackups(n uint32) error { + var cErr *C.char + C.rocksdb_backup_engine_purge_old_backups(b.c, C.uint32_t(n), &cErr) + if cErr != nil { + defer C.rocksdb_free(unsafe.Pointer(cErr)) + return errors.New(C.GoString(cErr)) + } + return nil +} + // Close close the backup engine and cleans up state // The backups already taken remain on storage. func (b *BackupEngine) Close() { From 343ac0743cf872fc0016eb686fba94b082cf8c8e Mon Sep 17 00:00:00 2001 From: Kenny House Date: Tue, 5 Nov 2019 08:34:15 -0500 Subject: [PATCH 45/48] Add missing table options and full bloom filter --- filter_policy.go | 6 +++ options_block_based_table.go | 79 ++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) diff --git a/filter_policy.go b/filter_policy.go index ac57fd99..a9c222b0 100644 --- a/filter_policy.go +++ b/filter_policy.go @@ -49,6 +49,12 @@ func NewBloomFilter(bitsPerKey int) FilterPolicy { return NewNativeFilterPolicy(C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey))) } +// NewBloomFilterFull returns a new filter policy created with use_block_based_builder=false +// (use full or partitioned filter). +func NewBloomFilterFull(bitsPerKey int) FilterPolicy { + return NewNativeFilterPolicy(C.rocksdb_filterpolicy_create_bloom_full(C.int(bitsPerKey))) +} + // Hold references to filter policies. var filterPolicies = NewCOWList() diff --git a/options_block_based_table.go b/options_block_based_table.go index e91bed01..80244132 100644 --- a/options_block_based_table.go +++ b/options_block_based_table.go @@ -56,6 +56,14 @@ func (opts *BlockBasedTableOptions) SetCacheIndexAndFilterBlocks(value bool) { C.rocksdb_block_based_options_set_cache_index_and_filter_blocks(opts.c, boolToChar(value)) } +// SetCacheIndexAndFilterBlocksWithHighPriority sets cache index and filter +// blocks with high priority (if cache_index_and_filter_blocks is enabled). +// If set to true, depending on implementation of block cache, +// index and filter blocks may be less likely to be evicted than data blocks. +func (opts *BlockBasedTableOptions) SetCacheIndexAndFilterBlocksWithHighPriority(value bool) { + C.rocksdb_block_based_options_set_cache_index_and_filter_blocks_with_high_priority(opts.c, boolToChar(value)) +} + // SetPinL0FilterAndIndexBlocksInCache sets cache_index_and_filter_blocks. // If is true and the below is true (hash_index_allow_collision), then // filter and index blocks are stored in the cache, but a reference is @@ -65,6 +73,15 @@ func (opts *BlockBasedTableOptions) SetPinL0FilterAndIndexBlocksInCache(value bo C.rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(opts.c, boolToChar(value)) } +// SetPinTopLevelIndexAndFilter set that if cache_index_and_filter_blocks is true, then +// the top-level index of partitioned filter and index blocks are stored in +// the cache, but a reference is held in the "table reader" object so the +// blocks are pinned and only evicted from cache when the table reader is +// freed. This is not limited to l0 in LSM tree. +func (opts *BlockBasedTableOptions) SetPinTopLevelIndexAndFilter(value bool) { + C.rocksdb_block_based_options_set_pin_top_level_index_and_filter(opts.c, boolToChar(value)) +} + // SetBlockSize sets the approximate size of user data packed per block. // Note that the block size specified here corresponds opts uncompressed data. // The actual size of the unit read from disk may be smaller if @@ -94,6 +111,39 @@ func (opts *BlockBasedTableOptions) SetBlockRestartInterval(blockRestartInterval C.rocksdb_block_based_options_set_block_restart_interval(opts.c, C.int(blockRestartInterval)) } +// SetIndexBlockRestartInterval is the same as SetBlockRestartInterval but used for the index block. +// Default: 1 +func (opts *BlockBasedTableOptions) SetIndexBlockRestartInterval(indexBlockRestartInterval int) { + C.rocksdb_block_based_options_set_index_block_restart_interval(opts.c, C.int(indexBlockRestartInterval)) +} + +// SetMetadataBlockSize sets the block size for partitioned metadata. +// Currently applied to indexes when +// kTwoLevelIndexSearch is used and to filters when partition_filters is used. +// Note: Since in the current implementation the filters and index partitions +// are aligned, an index/filter block is created when either index or filter +// block size reaches the specified limit. +// Note: this limit is currently applied to only index blocks; a filter +// partition is cut right after an index block is cut +// Default: 4096 +func (opts *BlockBasedTableOptions) SetMetadataBlockSize(metadataBlockSize uint64) { + C.rocksdb_block_based_options_set_metadata_block_size(opts.c, C.uint64_t(metadataBlockSize)) +} + +// SetPartitionFilters sets using partitioned full filters for each SST file. +// This option is incompatible with block-based filters. +// Note: currently this option requires kTwoLevelIndexSearch to be set as well. +// Default: false +func (opts *BlockBasedTableOptions) SetPartitionFilters(value bool) { + C.rocksdb_block_based_options_set_partition_filters(opts.c, boolToChar(value)) +} + +// SetUseDeltaEncoding sets using delta encoding to compress keys in blocks. +// ReadOptions::pin_data requires this option to be disabled. +func (opts *BlockBasedTableOptions) SetUseDeltaEncoding(value bool) { + C.rocksdb_block_based_options_set_use_delta_encoding(opts.c, boolToChar(value)) +} + // SetFilterPolicy sets the filter policy opts reduce disk reads. // Many applications will benefit from passing the result of // NewBloomFilterPolicy() here. @@ -141,6 +191,35 @@ func (opts *BlockBasedTableOptions) SetWholeKeyFiltering(value bool) { C.rocksdb_block_based_options_set_whole_key_filtering(opts.c, boolToChar(value)) } +// SetFormatVersion sets the format version. +// We currently have five versions: +// 0 -- This version is currently written out by all RocksDB's versions by +// default. Can be read by really old RocksDB's. Doesn't support changing +// checksum (default is CRC32). +// 1 -- Can be read by RocksDB's versions since 3.0. Supports non-default +// checksum, like xxHash. It is written by RocksDB when +// BlockBasedTableOptions::checksum is something other than kCRC32c. (version +// 0 is silently upconverted) +// 2 -- Can be read by RocksDB's versions since 3.10. Changes the way we +// encode compressed blocks with LZ4, BZip2 and Zlib compression. If you +// don't plan to run RocksDB before version 3.10, you should probably use +// this. +// 3 -- Can be read by RocksDB's versions since 5.15. Changes the way we +// encode the keys in index blocks. If you don't plan to run RocksDB before +// version 5.15, you should probably use this. +// This option only affects newly written tables. When reading existing +// tables, the information about version is read from the footer. +// 4 -- Can be read by RocksDB's versions since 5.16. Changes the way we +// encode the values in index blocks. If you don't plan to run RocksDB before +// version 5.16 and you are using index_block_restart_interval > 1, you should +// probably use this as it would reduce the index size. +// This option only affects newly written tables. When reading existing +// tables, the information about version is read from the footer. +// Default: 2 +func (opts *BlockBasedTableOptions) SetFormatVersion(version int) { + C.rocksdb_block_based_options_set_format_version(opts.c, C.int(version)) +} + // SetIndexType sets the index type used for this table. // kBinarySearch: // A space efficient index block that is optimized for From b74127cac9c798a756bc2904ed42b707ddea06a6 Mon Sep 17 00:00:00 2001 From: ferhat elmas Date: Fri, 15 Nov 2019 01:00:55 +0100 Subject: [PATCH 46/48] Adjust go versions in travis fixes #183 --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index e22ede6b..9b331467 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,8 @@ dist: xenial language: go go: - - 1.11 + - 1.12.x + - 1.13.x - tip before_install: From bbc5fe767d886aa4bca5fb77b67a17da4b5ff57b Mon Sep 17 00:00:00 2001 From: Felix Date: Wed, 11 Dec 2019 15:43:39 +0100 Subject: [PATCH 47/48] add NewMemEnv --- env.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/env.go b/env.go index 386335bc..11e84ef8 100644 --- a/env.go +++ b/env.go @@ -13,6 +13,11 @@ func NewDefaultEnv() *Env { return NewNativeEnv(C.rocksdb_create_default_env()) } +// NewMemEnv creates MemEnv for in-memory testing. +func NewMemEnv() *Env { + return NewNativeEnv(C.rocksdb_create_mem_env()) +} + // NewNativeEnv creates a Environment object. func NewNativeEnv(c *C.rocksdb_env_t) *Env { return &Env{c} From 9721107d4a29f0c874dc43c997fbe921ca504390 Mon Sep 17 00:00:00 2001 From: Sumit Date: Tue, 25 Jan 2022 19:49:24 +0530 Subject: [PATCH 48/48] - fix for latest rocksdb --- db.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/db.go b/db.go index 64735c61..d67fbff3 100755 --- a/db.go +++ b/db.go @@ -592,6 +592,7 @@ func (db *DB) GetApproximateSizes(ranges []Range) []uint64 { cLimits := make([]*C.char, len(ranges)) cStartLens := make([]C.size_t, len(ranges)) cLimitLens := make([]C.size_t, len(ranges)) + cErrors := make([]*C.char, len(ranges)) for i, r := range ranges { cStarts[i] = (*C.char)(C.CBytes(r.Start)) cStartLens[i] = C.size_t(len(r.Start)) @@ -613,7 +614,8 @@ func (db *DB) GetApproximateSizes(ranges []Range) []uint64 { &cStartLens[0], &cLimits[0], &cLimitLens[0], - (*C.uint64_t)(&sizes[0])) + (*C.uint64_t)(&sizes[0]), + &cErrors[0]) return sizes } @@ -633,6 +635,7 @@ func (db *DB) GetApproximateSizesCF(cf *ColumnFamilyHandle, ranges []Range) []ui cLimits := make([]*C.char, len(ranges)) cStartLens := make([]C.size_t, len(ranges)) cLimitLens := make([]C.size_t, len(ranges)) + cErrors := make([]*C.char, len(ranges)) for i, r := range ranges { cStarts[i] = (*C.char)(C.CBytes(r.Start)) cStartLens[i] = C.size_t(len(r.Start)) @@ -655,7 +658,8 @@ func (db *DB) GetApproximateSizesCF(cf *ColumnFamilyHandle, ranges []Range) []ui &cStartLens[0], &cLimits[0], &cLimitLens[0], - (*C.uint64_t)(&sizes[0])) + (*C.uint64_t)(&sizes[0]), + &cErrors[0]) return sizes }