From 8fe79651c4b769c0773fd57dbad4c9c5302e3cd3 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Thu, 11 May 2017 15:26:58 +0200 Subject: [PATCH 01/55] Add support for checkpoints --- checkpoint.go | 52 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 checkpoint.go diff --git a/checkpoint.go b/checkpoint.go new file mode 100644 index 00000000..a91b0c85 --- /dev/null +++ b/checkpoint.go @@ -0,0 +1,52 @@ +package gorocksdb + +// #include "rocksdb/c.h" +// #include +import "C" +import ( + "errors" + "unsafe" +) + +// Cache is a cache used to store data read from data in memory. +type Checkpoint struct { + c *C.rocksdb_checkpoint_t +} + +// NewLRUCache creates a new LRU Cache object with the capacity given. +func NewCheckpoint(db *DB) (*Checkpoint, error) { + var cErr *C.char + cCheckpoint := C.rocksdb_checkpoint_object_create(db.c, &cErr) + + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return nil, errors.New(C.GoString(cErr)) + } + return NewNativeCheckpoint(cCheckpoint), nil +} + +// NewNativeCheckpoint creates a Checkpoint object. +func NewNativeCheckpoint(c *C.rocksdb_checkpoint_t) *Checkpoint { + return &Checkpoint{c} +} + +// CreateCheckpoint creates the actual checkpoint in the specified directory. +// The logSizeForFlush argument is used the maximum size (in bytes) of the WAL +// files that will be copied. If the size of the WAL files is larger than the +// specfied size the memtables will be flushed to disk before making the +// checkpoint. Otherwise the WAL files will simply be copied. +func (c *Checkpoint) CreateCheckpoint(dir string, logSizeForFlush uint64) error { + var cErr *C.char + C.rocksdb_checkpoint_create(c.c, C.CString(dir), C.uint64_t(logSizeForFlush), &cErr) + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return errors.New(C.GoString(cErr)) + } + return nil +} + +// Destroy deallocates the Cache object. +func (c *Checkpoint) Destroy() { + C.rocksdb_checkpoint_object_destroy(c.c) + c.c = nil +} From e06553f82df9fcb2e9fc35cd0e5e8930168e4f28 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 26 Sep 2017 11:51:46 +0200 Subject: [PATCH 02/55] Revert "Merge branch 'checkpoint'" This reverts commit e0dec52798168d6c1a896223f8f4a8b21c771fc6, reversing changes made to 0c8578d6d39ca2b703779607454b1a7cb6614e61. --- checkpoint.go | 52 --------------------------------------------------- 1 file changed, 52 deletions(-) delete mode 100644 checkpoint.go diff --git a/checkpoint.go b/checkpoint.go deleted file mode 100644 index a91b0c85..00000000 --- a/checkpoint.go +++ /dev/null @@ -1,52 +0,0 @@ -package gorocksdb - -// #include "rocksdb/c.h" -// #include -import "C" -import ( - "errors" - "unsafe" -) - -// Cache is a cache used to store data read from data in memory. -type Checkpoint struct { - c *C.rocksdb_checkpoint_t -} - -// NewLRUCache creates a new LRU Cache object with the capacity given. -func NewCheckpoint(db *DB) (*Checkpoint, error) { - var cErr *C.char - cCheckpoint := C.rocksdb_checkpoint_object_create(db.c, &cErr) - - if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) - return nil, errors.New(C.GoString(cErr)) - } - return NewNativeCheckpoint(cCheckpoint), nil -} - -// NewNativeCheckpoint creates a Checkpoint object. -func NewNativeCheckpoint(c *C.rocksdb_checkpoint_t) *Checkpoint { - return &Checkpoint{c} -} - -// CreateCheckpoint creates the actual checkpoint in the specified directory. -// The logSizeForFlush argument is used the maximum size (in bytes) of the WAL -// files that will be copied. If the size of the WAL files is larger than the -// specfied size the memtables will be flushed to disk before making the -// checkpoint. Otherwise the WAL files will simply be copied. -func (c *Checkpoint) CreateCheckpoint(dir string, logSizeForFlush uint64) error { - var cErr *C.char - C.rocksdb_checkpoint_create(c.c, C.CString(dir), C.uint64_t(logSizeForFlush), &cErr) - if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) - return errors.New(C.GoString(cErr)) - } - return nil -} - -// Destroy deallocates the Cache object. -func (c *Checkpoint) Destroy() { - C.rocksdb_checkpoint_object_destroy(c.c) - c.c = nil -} From cbe0e2114f9de8fa6797d5e9c88886386ae8d6a0 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Fri, 29 Sep 2017 15:07:21 +0200 Subject: [PATCH 03/55] Start with putv --- util.go | 26 ++++++++++++++++++++++++++ write_batch.go | 37 ++++++++++++++++++++++++++++++++++++- 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/util.go b/util.go index 9c373306..aadde792 100644 --- a/util.go +++ b/util.go @@ -2,6 +2,7 @@ package gorocksdb import "C" import ( + "fmt" "reflect" "unsafe" ) @@ -39,6 +40,31 @@ func byteToChar(b []byte) *C.char { return c } +func byteSliceToArray(vals [][]byte) (**C.char, *C.size_t) { + if len(vals) == 0 { + return nil, nil + } + + chars := make([]*C.char, len(vals)) + sizes := make([]C.size_t, len(vals)) + for i, val := range vals { + chars[i] = byteToChar(val) + sizes[i] = C.size_t(len(val)) + } + + cCharBuf := C.malloc(C.size_t(unsafe.Sizeof(chars[0])) * C.size_t(len(chars))) + copied := copy(((*[1 << 32]*C.char)(cCharBuf))[:], chars) + fmt.Println("COPIED X BYTES:", copied) + + cChars := (**C.char)(cCharBuf) + + cSizes := (*C.size_t)(unsafe.Pointer(&sizes[0])) + fmt.Println("sizes", sizes) + fmt.Println("chars", chars) + return cChars, cSizes + +} + // Go []byte to C string // The C string is allocated in the C heap using malloc. func cByteSlice(b []byte) *C.char { diff --git a/write_batch.go b/write_batch.go index a4a5b8ba..f33b12e9 100644 --- a/write_batch.go +++ b/write_batch.go @@ -2,7 +2,10 @@ package gorocksdb // #include "rocksdb/c.h" import "C" -import "io" +import ( + "errors" + "io" +) // WriteBatch is a batching of Puts, Merges and Deletes. type WriteBatch struct { @@ -38,6 +41,38 @@ func (wb *WriteBatch) PutCF(cf *ColumnFamilyHandle, key, value []byte) { C.rocksdb_writebatch_put_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value))) } +// Putv queues many key and value pairs +func (wb *WriteBatch) Putv(keys, values [][]byte) error { + if len(keys) != len(values) { + return errors.New("Number of keys and values should be the same") + } + numKeys := C.int(len(keys)) + cKeys, cKeySizes := byteSliceToArray(keys) + cValues, cValueSizes := byteSliceToArray(values) + C.rocksdb_writebatch_putv( + wb.c, + numKeys, cKeys, cKeySizes, + numKeys, cValues, cValueSizes, + ) + return nil +} + +// PutvCF queues many key and value pairs in a column family +func (wb *WriteBatch) PutvCF(cf *ColumnFamilyHandle, keys, values [][]byte) error { + if len(keys) != len(values) { + return errors.New("Number of keys and values should be the same") + } + numKeys := C.int(len(keys)) + cKeys, cKeySizes := byteSliceToArray(keys) + cValues, cValueSizes := byteSliceToArray(values) + C.rocksdb_writebatch_putv_cf( + wb.c, cf.c, + numKeys, cKeys, cKeySizes, + numKeys, cValues, cValueSizes, + ) + return nil +} + // Merge queues a merge of "value" with the existing value of "key". func (wb *WriteBatch) Merge(key, value []byte) { cKey := byteToChar(key) From 42cb556b2a2a350ad2e17c13786b5ac26cfd0809 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Fri, 29 Sep 2017 16:47:12 +0200 Subject: [PATCH 04/55] Add ManyKeys functionality --- gorocksdb.c | 39 +++++++++++++++++++++++++++++++++++++++ gorocksdb.h | 11 +++++++++++ iterator.go | 31 +++++++++++++++++++++++++++++++ iterator_test.go | 35 +++++++++++++++++++++++++++++++++++ 4 files changed, 116 insertions(+) diff --git a/gorocksdb.c b/gorocksdb.c index c8258376..8d94df77 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -1,5 +1,6 @@ #include "gorocksdb.h" #include "_cgo_export.h" +#include /* Base */ @@ -64,3 +65,41 @@ rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx) { (unsigned char (*)(void*, const char*, size_t))(gorocksdb_slicetransform_in_range), (const char* (*)(void*))(gorocksdb_slicetransform_name)); } + +gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, int size) { + int i = 0; + gorocksdb_many_keys_t* many_keys = (gorocksdb_many_keys_t*) malloc(sizeof(gorocksdb_many_keys_t)); + + char** keys; + size_t* key_sizes; + keys = (char**) malloc(size * sizeof(char*)); + key_sizes = (size_t*) malloc(size * sizeof(size_t)); + + for (i = 0; i < size; i++) { + if (!rocksdb_iter_valid(iter)) { + break; + } + + // Stuff + const char* key = rocksdb_iter_key(iter, &key_sizes[i]); + keys[i] = (char*) malloc(key_sizes[i] * sizeof(char)); + memcpy(keys[i], key, key_sizes[i]); + + rocksdb_iter_next(iter); + } + + many_keys->keys = keys; + many_keys->key_sizes = key_sizes; + many_keys->found = i; + return many_keys; +} + +void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { + for (int i = 0; i < many_keys->found; i++) { + free(many_keys->keys[i]); + } + + free(many_keys->keys); + free(many_keys->key_sizes); + free(many_keys); +} diff --git a/gorocksdb.h b/gorocksdb.h index 4a9968f0..5f49e7ce 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -1,6 +1,14 @@ #include #include "rocksdb/c.h" +typedef struct { + char** keys; + size_t* key_sizes; + int found; + +} gorocksdb_many_keys_t; + + // This API provides convenient C wrapper functions for rocksdb client. /* Base */ @@ -28,3 +36,6 @@ extern void gorocksdb_mergeoperator_delete_value(void* state, const char* v, siz /* Slice Transform */ extern rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx); + +extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, int size); +void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys); diff --git a/iterator.go b/iterator.go index 4a280e2c..786332bf 100644 --- a/iterator.go +++ b/iterator.go @@ -2,6 +2,7 @@ package gorocksdb // #include // #include "rocksdb/c.h" +// #include "gorocksdb.h" import "C" import ( "bytes" @@ -79,6 +80,36 @@ func (iter *Iterator) Next() { C.rocksdb_iter_next(iter.c) } +type ManyKeys struct { + c *C.gorocksdb_many_keys_t +} + +func (m *ManyKeys) Destroy() { + C.gorocksdb_destroy_many_keys(m.c) +} + +func (m *ManyKeys) Found() int { + return int(m.c.found) +} + +func (m *ManyKeys) Keys() [][]byte { + found := m.Found() + keys := make([][]byte, found) + + for i := uintptr(0); i < uintptr(found); i++ { + chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.keys)) + i*unsafe.Sizeof(m.c.keys))) + size := *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.key_sizes)) + i*unsafe.Sizeof(m.c.key_sizes))) + keys[i] = charToByte(chars, size) + + } + return keys +} + +//.... +func (iter *Iterator) NextManyKeys(size int) *ManyKeys { + return &ManyKeys{c: C.gorocksdb_iter_next_many_keys(iter.c, C.int(size))} +} + // Prev moves the iterator to the previous sequential key in the database. func (iter *Iterator) Prev() { C.rocksdb_iter_prev(iter.c) diff --git a/iterator_test.go b/iterator_test.go index 358400ba..ddb5c9d8 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -1,6 +1,7 @@ package gorocksdb import ( + "fmt" "testing" "github.com/facebookgo/ensure" @@ -29,3 +30,37 @@ func TestIterator(t *testing.T) { ensure.Nil(t, iter.Err()) ensure.DeepEqual(t, actualKeys, givenKeys) } + +func TestIteratorMany(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("key1"), []byte("key2"), []byte("key3")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val"))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + var actualKeys [][]byte + iter.SeekToFirst() + + manyKeys := iter.NextManyKeys(2) + for manyKeys.Found() > 0 { + fmt.Println(manyKeys.Found()) + for _, k := range manyKeys.Keys() { + fmt.Println(string(k)) + newK := make([]byte, len(k)) + copy(newK, k) + actualKeys = append(actualKeys, newK) + } + manyKeys.Destroy() + manyKeys = iter.NextManyKeys(2) + } + manyKeys.Destroy() + ensure.Nil(t, iter.Err()) + ensure.DeepEqual(t, actualKeys, givenKeys) +} From 7c8e018d62ea79831d29086a5d2ac43b0a6b75a0 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Fri, 29 Sep 2017 17:19:01 +0200 Subject: [PATCH 05/55] ANSI C WOOOHOOO --- gorocksdb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gorocksdb.c b/gorocksdb.c index 8d94df77..46784ea6 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -95,7 +95,8 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, i } void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { - for (int i = 0; i < many_keys->found; i++) { + int i; + for (i = 0; i < many_keys->found; i++) { free(many_keys->keys[i]); } From 77769d69bde7c9ce8c8d79e31e949e4d149f75f7 Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Sat, 30 Sep 2017 08:57:01 +0200 Subject: [PATCH 06/55] wip --- gorocksdb.c | 4 ++++ gorocksdb.h | 6 +++++- iterator.go | 12 ++++++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/gorocksdb.c b/gorocksdb.c index 46784ea6..2dde6799 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -94,6 +94,10 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, i return many_keys; } +gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size, const char* key_prefix, const char* key_limit) { + return gorocksdb_iter_next_many_keys(iter, size); +} + void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { int i; for (i = 0; i < many_keys->found; i++) { diff --git a/gorocksdb.h b/gorocksdb.h index 5f49e7ce..26b9b33a 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -37,5 +37,9 @@ extern void gorocksdb_mergeoperator_delete_value(void* state, const char* v, siz extern rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx); +/* Iterate many keys */ + extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, int size); -void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys); +extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size, const char* key_prefix, const char* key_limit); + +extern void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys); diff --git a/iterator.go b/iterator.go index 786332bf..b488939c 100644 --- a/iterator.go +++ b/iterator.go @@ -110,6 +110,18 @@ func (iter *Iterator) NextManyKeys(size int) *ManyKeys { return &ManyKeys{c: C.gorocksdb_iter_next_many_keys(iter.c, C.int(size))} } +//if seekAt.HasPrefix != nil && !bytes.HasPrefix(key, seekAt.HasPrefix) { +//return false +//} +//if seekAt.LimitKey != nil && bytes.Compare(key, seekAt.LimitKey) != -1 { +//return false +//} + +//.... +func (iter *Iterator) NextManyKeysF(size int, keyPrefix, keyEnd []byte) *ManyKeys { + return &ManyKeys{c: C.gorocksdb_iter_next_many_keys_f(iter.c, C.int(size), byteToChar(keyPrefix), byteToChar(keyEnd))} +} + // Prev moves the iterator to the previous sequential key in the database. func (iter *Iterator) Prev() { C.rocksdb_iter_prev(iter.c) From 38a896e2a86de5e58fa84fac451f83ba7df6b35a Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Sat, 30 Sep 2017 12:25:14 +0200 Subject: [PATCH 07/55] add iter many keys with filtering --- gorocksdb.c | 50 +++++++++++++++++++++++- gorocksdb.h | 11 +++++- iterator.go | 22 +++++++---- iterator_test.go | 99 ++++++++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 168 insertions(+), 14 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index 2dde6799..51f0d997 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -94,8 +94,54 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, i return many_keys; } -gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size, const char* key_prefix, const char* key_limit) { - return gorocksdb_iter_next_many_keys(iter, size); +#include "stdio.h" + +gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size, const gorocksdb_many_keys_filter_t* key_filter) { + int i = 0; + gorocksdb_many_keys_t* many_keys = (gorocksdb_many_keys_t*) malloc(sizeof(gorocksdb_many_keys_t)); + + char** keys; + size_t* key_sizes; + size_t key_size, cmp_size; + keys = (char**) malloc(size * sizeof(char*)); + key_sizes = (size_t*) malloc(size * sizeof(size_t)); + + for (i = 0; i < size; i++) { + if (!rocksdb_iter_valid(iter)) { + break; + } + // Get key + const char* key = rocksdb_iter_key(iter, &key_size); + // Check filter + if (key_filter->key_prefix_s > 0) { + if (key_size < key_filter->key_prefix_s) { + break; + } + if (memcmp(key_filter->key_prefix, key, key_filter->key_prefix_s) != 0) { + break; + } + } + if (key_filter->key_end_s > 0) { + cmp_size = key_size > key_filter->key_end_s ? key_filter->key_end_s : key_size; + int c; + c = memcmp(key, key_filter->key_end, cmp_size); + if (c == 0 && key_filter->key_end_s == key_size) { + break; + } else if (c > 0) { + break; + } + } + key_sizes[i] = key_size; + keys[i] = (char*) malloc(key_sizes[i] * sizeof(char)); + memcpy(keys[i], key, key_sizes[i]); + + rocksdb_iter_next(iter); + } + + many_keys->keys = keys; + many_keys->key_sizes = key_sizes; + many_keys->found = i; + return many_keys; } void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { diff --git a/gorocksdb.h b/gorocksdb.h index 26b9b33a..33081f52 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -40,6 +40,15 @@ extern rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx); /* Iterate many keys */ extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, int size); -extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size, const char* key_prefix, const char* key_limit); + +typedef struct { + char* key_prefix; + size_t key_prefix_s; + char* key_end; + size_t key_end_s; + +} gorocksdb_many_keys_filter_t; + +extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size, const gorocksdb_many_keys_filter_t* key_filter); extern void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys); diff --git a/iterator.go b/iterator.go index b488939c..878f5d41 100644 --- a/iterator.go +++ b/iterator.go @@ -110,16 +110,22 @@ func (iter *Iterator) NextManyKeys(size int) *ManyKeys { return &ManyKeys{c: C.gorocksdb_iter_next_many_keys(iter.c, C.int(size))} } -//if seekAt.HasPrefix != nil && !bytes.HasPrefix(key, seekAt.HasPrefix) { -//return false -//} -//if seekAt.LimitKey != nil && bytes.Compare(key, seekAt.LimitKey) != -1 { -//return false -//} - //.... func (iter *Iterator) NextManyKeysF(size int, keyPrefix, keyEnd []byte) *ManyKeys { - return &ManyKeys{c: C.gorocksdb_iter_next_many_keys_f(iter.c, C.int(size), byteToChar(keyPrefix), byteToChar(keyEnd))} + cKeyFilter := C.gorocksdb_many_keys_filter_t{} + if len(keyPrefix) > 0 { + cKeyPrefix := C.CString(string(keyPrefix)) + defer C.free(unsafe.Pointer(cKeyPrefix)) + cKeyFilter.key_prefix = cKeyPrefix + cKeyFilter.key_prefix_s = C.size_t(len(keyPrefix)) + } + if len(keyEnd) > 0 { + cKeyEnd := C.CString(string(keyEnd)) + defer C.free(unsafe.Pointer(cKeyEnd)) + cKeyFilter.key_end = cKeyEnd + cKeyFilter.key_end_s = C.size_t(len(keyEnd)) + } + return &ManyKeys{c: C.gorocksdb_iter_next_many_keys_f(iter.c, C.int(size), &cKeyFilter)} } // Prev moves the iterator to the previous sequential key in the database. diff --git a/iterator_test.go b/iterator_test.go index ddb5c9d8..84fb9029 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -1,7 +1,6 @@ package gorocksdb import ( - "fmt" "testing" "github.com/facebookgo/ensure" @@ -50,9 +49,7 @@ func TestIteratorMany(t *testing.T) { manyKeys := iter.NextManyKeys(2) for manyKeys.Found() > 0 { - fmt.Println(manyKeys.Found()) for _, k := range manyKeys.Keys() { - fmt.Println(string(k)) newK := make([]byte, len(k)) copy(newK, k) actualKeys = append(actualKeys, newK) @@ -64,3 +61,99 @@ func TestIteratorMany(t *testing.T) { ensure.Nil(t, iter.Err()) ensure.DeepEqual(t, actualKeys, givenKeys) } + +func TestIteratorManyFOnKeyPrefix(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyB1")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val"))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + var actualKeys [][]byte + iter.SeekToFirst() + + manyKeys := iter.NextManyKeysF(2, []byte("keyA"), nil) + for manyKeys.Found() > 0 { + for _, k := range manyKeys.Keys() { + newK := make([]byte, len(k)) + copy(newK, k) + actualKeys = append(actualKeys, newK) + } + manyKeys.Destroy() + manyKeys = iter.NextManyKeysF(2, []byte("keyA"), nil) + } + manyKeys.Destroy() + ensure.Nil(t, iter.Err()) + ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3")}) +} + +func TestIteratorManyFOnKeyEnd(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("A"), []byte("B"), []byte("C"), []byte("C1"), []byte("D")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val"))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + var actualKeys [][]byte + iter.SeekToFirst() + + manyKeys := iter.NextManyKeysF(2, nil, []byte("C1")) + for manyKeys.Found() > 0 { + for _, k := range manyKeys.Keys() { + newK := make([]byte, len(k)) + copy(newK, k) + actualKeys = append(actualKeys, newK) + } + manyKeys.Destroy() + manyKeys = iter.NextManyKeysF(2, nil, []byte("C1")) + } + manyKeys.Destroy() + ensure.Nil(t, iter.Err()) + ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("A"), []byte("B"), []byte("C")}) +} + +func TestIteratorManyFOnKeyPrefixAndEnd(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("keyA"), []byte("keyB"), []byte("keyC"), []byte("keyC1")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val"))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + var actualKeys [][]byte + iter.SeekToFirst() + + manyKeys := iter.NextManyKeysF(2, []byte("key"), []byte("keyC1")) + for manyKeys.Found() > 0 { + for _, k := range manyKeys.Keys() { + newK := make([]byte, len(k)) + copy(newK, k) + actualKeys = append(actualKeys, newK) + } + manyKeys.Destroy() + manyKeys = iter.NextManyKeysF(2, []byte("key"), []byte("keyC1")) + } + manyKeys.Destroy() + ensure.Nil(t, iter.Err()) + ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA"), []byte("keyB"), []byte("keyC")}) +} From 58af85d15bc98e8b1f3c7b7cf9041329868efe64 Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Sat, 30 Sep 2017 16:20:18 +0200 Subject: [PATCH 08/55] cleanup --- gorocksdb.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index 51f0d997..d5fafc92 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -94,15 +94,14 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, i return many_keys; } -#include "stdio.h" - gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size, const gorocksdb_many_keys_filter_t* key_filter) { int i = 0; - gorocksdb_many_keys_t* many_keys = (gorocksdb_many_keys_t*) malloc(sizeof(gorocksdb_many_keys_t)); - char** keys; size_t* key_sizes; size_t key_size, cmp_size; + + // todo: we malloc the prefetch size (improve it) + gorocksdb_many_keys_t* many_keys = (gorocksdb_many_keys_t*) malloc(sizeof(gorocksdb_many_keys_t)); keys = (char**) malloc(size * sizeof(char*)); key_sizes = (size_t*) malloc(size * sizeof(size_t)); @@ -131,10 +130,10 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, break; } } + keys[i] = (char*) malloc(key_size * sizeof(char)); + memcpy(keys[i], key, key_size); key_sizes[i] = key_size; - keys[i] = (char*) malloc(key_sizes[i] * sizeof(char)); - memcpy(keys[i], key, key_sizes[i]); - + // next rocksdb_iter_next(iter); } From ae6b37f78c7fcdec121a65342103e4cbb44ef01e Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Sun, 1 Oct 2017 09:37:52 +0200 Subject: [PATCH 09/55] add NextManyKeys.Values() --- gorocksdb.c | 36 ++++++++++++++++++++++++++++++------ gorocksdb.h | 2 ++ iterator.go | 12 ++++++++++++ iterator_test.go | 27 ++++++++++++++++++++++++--- 4 files changed, 68 insertions(+), 9 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index d5fafc92..ecbe18cb 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -90,22 +90,27 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, i many_keys->keys = keys; many_keys->key_sizes = key_sizes; + many_keys->values = NULL; + many_keys->value_sizes = 0; many_keys->found = i; return many_keys; } gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size, const gorocksdb_many_keys_filter_t* key_filter) { - int i = 0; - char** keys; - size_t* key_sizes; - size_t key_size, cmp_size; + int i; + char** keys, **values; + size_t* key_sizes, *value_sizes; + size_t key_size, value_size, cmp_size; // todo: we malloc the prefetch size (improve it) gorocksdb_many_keys_t* many_keys = (gorocksdb_many_keys_t*) malloc(sizeof(gorocksdb_many_keys_t)); keys = (char**) malloc(size * sizeof(char*)); key_sizes = (size_t*) malloc(size * sizeof(size_t)); + values = (char**) malloc(size * sizeof(char*)); + value_sizes = (size_t*) malloc(size * sizeof(size_t)); - for (i = 0; i < size; i++) { + i = 0; + while (i < size) { if (!rocksdb_iter_valid(iter)) { break; } @@ -130,15 +135,28 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, break; } } + // Store key keys[i] = (char*) malloc(key_size * sizeof(char)); memcpy(keys[i], key, key_size); key_sizes[i] = key_size; + // Get value and store it + const char* val = rocksdb_iter_value(iter, &value_size); + if (val != NULL) { + values[i] = (char*) malloc(value_size * sizeof(char)); + memcpy(values[i], val, value_size); + } else { + values[i] = NULL; + } + value_sizes[i] = value_size; // next rocksdb_iter_next(iter); + i++; } many_keys->keys = keys; many_keys->key_sizes = key_sizes; + many_keys->values = values; + many_keys->value_sizes = value_sizes; many_keys->found = i; return many_keys; } @@ -147,9 +165,15 @@ void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { int i; for (i = 0; i < many_keys->found; i++) { free(many_keys->keys[i]); + if (many_keys->values != NULL && many_keys->values[i] != NULL) { + free(many_keys->values[i]); + } } - free(many_keys->keys); free(many_keys->key_sizes); + if (many_keys->values != NULL) { + free(many_keys->values); + free(many_keys->value_sizes); + } free(many_keys); } diff --git a/gorocksdb.h b/gorocksdb.h index 33081f52..52ad8294 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -4,6 +4,8 @@ typedef struct { char** keys; size_t* key_sizes; + char** values; + size_t* value_sizes; int found; } gorocksdb_many_keys_t; diff --git a/iterator.go b/iterator.go index 878f5d41..3368578a 100644 --- a/iterator.go +++ b/iterator.go @@ -105,6 +105,18 @@ func (m *ManyKeys) Keys() [][]byte { return keys } +func (m *ManyKeys) Values() [][]byte { + found := m.Found() + values := make([][]byte, found) + + for i := uintptr(0); i < uintptr(found); i++ { + chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.values)) + i*unsafe.Sizeof(m.c.values))) + size := *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.value_sizes)) + i*unsafe.Sizeof(m.c.value_sizes))) + values[i] = charToByte(chars, size) + } + return values +} + //.... func (iter *Iterator) NextManyKeys(size int) *ManyKeys { return &ManyKeys{c: C.gorocksdb_iter_next_many_keys(iter.c, C.int(size))} diff --git a/iterator_test.go b/iterator_test.go index 84fb9029..a7f2f752 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -70,13 +70,14 @@ func TestIteratorManyFOnKeyPrefix(t *testing.T) { givenKeys := [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyB1")} wo := NewDefaultWriteOptions() for _, k := range givenKeys { - ensure.Nil(t, db.Put(wo, k, []byte("val"))) + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) } ro := NewDefaultReadOptions() iter := db.NewIterator(ro) defer iter.Close() var actualKeys [][]byte + var actualValues [][]byte iter.SeekToFirst() manyKeys := iter.NextManyKeysF(2, []byte("keyA"), nil) @@ -86,12 +87,18 @@ func TestIteratorManyFOnKeyPrefix(t *testing.T) { copy(newK, k) actualKeys = append(actualKeys, newK) } + for _, v := range manyKeys.Values() { + newV := make([]byte, len(v)) + copy(newV, v) + actualValues = append(actualValues, newV) + } manyKeys.Destroy() manyKeys = iter.NextManyKeysF(2, []byte("keyA"), nil) } manyKeys.Destroy() ensure.Nil(t, iter.Err()) ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3")}) + ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3")}) } func TestIteratorManyFOnKeyEnd(t *testing.T) { @@ -102,13 +109,14 @@ func TestIteratorManyFOnKeyEnd(t *testing.T) { givenKeys := [][]byte{[]byte("A"), []byte("B"), []byte("C"), []byte("C1"), []byte("D")} wo := NewDefaultWriteOptions() for _, k := range givenKeys { - ensure.Nil(t, db.Put(wo, k, []byte("val"))) + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) } ro := NewDefaultReadOptions() iter := db.NewIterator(ro) defer iter.Close() var actualKeys [][]byte + var actualValues [][]byte iter.SeekToFirst() manyKeys := iter.NextManyKeysF(2, nil, []byte("C1")) @@ -118,12 +126,18 @@ func TestIteratorManyFOnKeyEnd(t *testing.T) { copy(newK, k) actualKeys = append(actualKeys, newK) } + for _, v := range manyKeys.Values() { + newV := make([]byte, len(v)) + copy(newV, v) + actualValues = append(actualValues, newV) + } manyKeys.Destroy() manyKeys = iter.NextManyKeysF(2, nil, []byte("C1")) } manyKeys.Destroy() ensure.Nil(t, iter.Err()) ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("A"), []byte("B"), []byte("C")}) + ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_A"), []byte("val_B"), []byte("val_C")}) } func TestIteratorManyFOnKeyPrefixAndEnd(t *testing.T) { @@ -134,13 +148,14 @@ func TestIteratorManyFOnKeyPrefixAndEnd(t *testing.T) { givenKeys := [][]byte{[]byte("keyA"), []byte("keyB"), []byte("keyC"), []byte("keyC1")} wo := NewDefaultWriteOptions() for _, k := range givenKeys { - ensure.Nil(t, db.Put(wo, k, []byte("val"))) + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) } ro := NewDefaultReadOptions() iter := db.NewIterator(ro) defer iter.Close() var actualKeys [][]byte + var actualValues [][]byte iter.SeekToFirst() manyKeys := iter.NextManyKeysF(2, []byte("key"), []byte("keyC1")) @@ -150,10 +165,16 @@ func TestIteratorManyFOnKeyPrefixAndEnd(t *testing.T) { copy(newK, k) actualKeys = append(actualKeys, newK) } + for _, v := range manyKeys.Values() { + newV := make([]byte, len(v)) + copy(newV, v) + actualValues = append(actualValues, newV) + } manyKeys.Destroy() manyKeys = iter.NextManyKeysF(2, []byte("key"), []byte("keyC1")) } manyKeys.Destroy() ensure.Nil(t, iter.Err()) ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA"), []byte("keyB"), []byte("keyC")}) + ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA"), []byte("val_keyB"), []byte("val_keyC")}) } From 83f8a8e93b2aa910cec99c2ccedb0fb849f11b19 Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Mon, 2 Oct 2017 16:59:41 +0200 Subject: [PATCH 10/55] add CGO ManySearchKeys --- gorocksdb.c | 24 ++++++++ gorocksdb.h | 15 +++++ iterator.go | 53 +++++++++++++++++ iterator_test.go | 144 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 236 insertions(+) diff --git a/gorocksdb.c b/gorocksdb.c index ecbe18cb..4f876988 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -177,3 +177,27 @@ void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { } free(many_keys); } + +gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size, int max_per_iter) { + int i; + gorocksdb_many_keys_filter_t key_filter; + gorocksdb_many_keys_t** result = (gorocksdb_many_keys_t**) malloc(size*sizeof(gorocksdb_many_keys_t*)); + for (i=0; i < size; i++) { + rocksdb_iter_seek(iter, keys_searches[i].key_from, keys_searches[i].key_from_s); + key_filter.key_prefix = keys_searches[i].key_prefix; + key_filter.key_prefix_s = keys_searches[i].key_prefix_s; + key_filter.key_end = keys_searches[i].key_end; + key_filter.key_end_s = keys_searches[i].key_end_s; + result[i] = gorocksdb_iter_next_many_keys_f(iter, max_per_iter, &key_filter); + } + return result; +} + +void gorocksdb_destroy_many_many_keys(gorocksdb_many_keys_t** many_many_keys, int size) { + int i; + for (i = 0; i < size; i++) { + gorocksdb_destroy_many_keys(many_many_keys[i]); + } + free(many_many_keys); +} + diff --git a/gorocksdb.h b/gorocksdb.h index 52ad8294..31d1473a 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -54,3 +54,18 @@ typedef struct { extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size, const gorocksdb_many_keys_filter_t* key_filter); extern void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys); + +typedef struct { + char* key_from; + size_t key_from_s; + char* key_prefix; + size_t key_prefix_s; + char* key_end; + size_t key_end_s; + +} gorocksdb_keys_search_t; + +gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size, int max_per_iter); + +void gorocksdb_destroy_many_many_keys(gorocksdb_many_keys_t** many_many_keys, int size); + diff --git a/iterator.go b/iterator.go index 3368578a..0f9e42d5 100644 --- a/iterator.go +++ b/iterator.go @@ -7,6 +7,7 @@ import "C" import ( "bytes" "errors" + "reflect" "unsafe" ) @@ -140,6 +141,58 @@ func (iter *Iterator) NextManyKeysF(size int, keyPrefix, keyEnd []byte) *ManyKey return &ManyKeys{c: C.gorocksdb_iter_next_many_keys_f(iter.c, C.int(size), &cKeyFilter)} } +type KeysSearch struct { + KeyFrom, KeyPrefix, KeyEnd []byte +} + +type ManyManyKeys struct { + c **C.gorocksdb_many_keys_t + size int +} + +func (iter *Iterator) ManySearchKeys(searches []KeysSearch, maxIterPerSearch int) *ManyManyKeys { + nbSearches := len(searches) + cManyKeysSearches := make([]C.gorocksdb_keys_search_t, nbSearches) + for i, search := range searches { + cKSearch := C.gorocksdb_keys_search_t{} + cKFrom := C.CString(string(search.KeyFrom)) + defer C.free(unsafe.Pointer(cKFrom)) + cKSearch.key_from = cKFrom + cKSearch.key_from_s = C.size_t(len(search.KeyFrom)) + if len(search.KeyPrefix) > 0 { + cKPrefix := C.CString(string(search.KeyPrefix)) + defer C.free(unsafe.Pointer(cKPrefix)) + cKSearch.key_prefix = cKPrefix + cKSearch.key_prefix_s = C.size_t(len(search.KeyPrefix)) + } + if len(search.KeyEnd) > 0 { + cKEnd := C.CString(string(search.KeyEnd)) + defer C.free(unsafe.Pointer(cKEnd)) + cKSearch.key_end = cKEnd + cKSearch.key_end_s = C.size_t(len(search.KeyEnd)) + } + cManyKeysSearches[i] = cKSearch + } + cManyManyKeys := C.gorocksdb_many_search_keys(iter.c, + (*C.gorocksdb_keys_search_t)(unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(&cManyKeysSearches)).Data)), + C.int(nbSearches), C.int(maxIterPerSearch)) + + return &ManyManyKeys{c: cManyManyKeys, size: nbSearches} +} + +func (m ManyManyKeys) Result() []*ManyKeys { + result := make([]*ManyKeys, m.size) + for i := uintptr(0); i < uintptr(m.size); i++ { + manyKeys := *(**C.gorocksdb_many_keys_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c)) + i*unsafe.Sizeof(m.c))) + result[i] = &ManyKeys{c: manyKeys} + } + return result +} + +func (m ManyManyKeys) Destroy() { + C.gorocksdb_destroy_many_many_keys(m.c, C.int(m.size)) +} + // Prev moves the iterator to the previous sequential key in the database. func (iter *Iterator) Prev() { C.rocksdb_iter_prev(iter.c) diff --git a/iterator_test.go b/iterator_test.go index a7f2f752..979ea47e 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -178,3 +178,147 @@ func TestIteratorManyFOnKeyPrefixAndEnd(t *testing.T) { ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA"), []byte("keyB"), []byte("keyC")}) ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA"), []byte("val_keyB"), []byte("val_keyC")}) } + +func TestIteratorManySearchKeys(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("A"), []byte("B"), []byte("C"), []byte("D"), []byte("E"), []byte("F")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + + searches := make([]KeysSearch, 3) + searches[0] = KeysSearch{KeyFrom: []byte("A")} + searches[1] = KeysSearch{KeyFrom: []byte("D")} + searches[2] = KeysSearch{KeyFrom: []byte("Z")} + + manyManyKeys := iter.ManySearchKeys(searches, 1000000) + defer manyManyKeys.Destroy() + result := manyManyKeys.Result() + if len(result) != len(searches) { + t.Fatalf("result len should be %d", len(searches)) + } + ensure.DeepEqual(t, result[0].Found(), 6) + ensure.DeepEqual(t, result[0].Keys(), [][]byte{[]byte("A"), []byte("B"), []byte("C"), []byte("D"), []byte("E"), []byte("F")}) + ensure.DeepEqual(t, result[0].Values(), [][]byte{[]byte("val_A"), []byte("val_B"), []byte("val_C"), []byte("val_D"), []byte("val_E"), []byte("val_F")}) + ensure.DeepEqual(t, result[1].Found(), 3) + ensure.DeepEqual(t, result[1].Keys(), [][]byte{[]byte("D"), []byte("E"), []byte("F")}) + ensure.DeepEqual(t, result[1].Values(), [][]byte{[]byte("val_D"), []byte("val_E"), []byte("val_F")}) + ensure.DeepEqual(t, result[2].Found(), 0) + ensure.DeepEqual(t, result[2].Keys(), [][]byte{}) + ensure.DeepEqual(t, result[2].Values(), [][]byte{}) +} + +func TestIteratorManySearchKeysWithKeyPrefix(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("A1"), []byte("A2"), []byte("B1"), []byte("C1"), []byte("D1"), []byte("D2")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + + searches := make([]KeysSearch, 4) + searches[0] = KeysSearch{KeyFrom: []byte("A"), KeyPrefix: []byte("A")} + searches[1] = KeysSearch{KeyFrom: []byte("B"), KeyPrefix: []byte("B")} + searches[2] = KeysSearch{KeyFrom: []byte("D"), KeyPrefix: []byte("D")} + searches[3] = KeysSearch{KeyFrom: []byte("Z"), KeyPrefix: []byte("Z")} + + manyManyKeys := iter.ManySearchKeys(searches, 1000000) + defer manyManyKeys.Destroy() + result := manyManyKeys.Result() + if len(result) != len(searches) { + t.Fatalf("result len should be %d", len(searches)) + } + ensure.DeepEqual(t, result[0].Found(), 2) + ensure.DeepEqual(t, result[0].Keys(), [][]byte{[]byte("A1"), []byte("A2")}) + ensure.DeepEqual(t, result[0].Values(), [][]byte{[]byte("val_A1"), []byte("val_A2")}) + ensure.DeepEqual(t, result[1].Found(), 1) + ensure.DeepEqual(t, result[1].Keys(), [][]byte{[]byte("B1")}) + ensure.DeepEqual(t, result[1].Values(), [][]byte{[]byte("val_B1")}) + ensure.DeepEqual(t, result[2].Found(), 2) + ensure.DeepEqual(t, result[2].Keys(), [][]byte{[]byte("D1"), []byte("D2")}) + ensure.DeepEqual(t, result[2].Values(), [][]byte{[]byte("val_D1"), []byte("val_D2")}) + ensure.DeepEqual(t, result[3].Found(), 0) + ensure.DeepEqual(t, result[3].Keys(), [][]byte{}) + ensure.DeepEqual(t, result[3].Values(), [][]byte{}) +} + +func TestIteratorManySearchKeysWithKeyEnd(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("A1"), []byte("A2"), []byte("A3"), []byte("B1"), []byte("B2"), []byte("B3")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + + searches := make([]KeysSearch, 2) + searches[0] = KeysSearch{KeyFrom: []byte("A"), KeyEnd: []byte("A3")} + searches[1] = KeysSearch{KeyFrom: []byte("B"), KeyEnd: []byte("B2")} + + manyManyKeys := iter.ManySearchKeys(searches, 1000000) + defer manyManyKeys.Destroy() + result := manyManyKeys.Result() + if len(result) != len(searches) { + t.Fatalf("result len should be %d", len(searches)) + } + ensure.DeepEqual(t, result[0].Found(), 2) + ensure.DeepEqual(t, result[0].Keys(), [][]byte{[]byte("A1"), []byte("A2")}) + ensure.DeepEqual(t, result[0].Values(), [][]byte{[]byte("val_A1"), []byte("val_A2")}) + ensure.DeepEqual(t, result[1].Found(), 1) + ensure.DeepEqual(t, result[1].Keys(), [][]byte{[]byte("B1")}) + ensure.DeepEqual(t, result[1].Values(), [][]byte{[]byte("val_B1")}) +} + +func TestIteratorManySearchKeysWithKeyPrefixAndEnd(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("keyC"), []byte("keyC0"), []byte("keyC1")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + + searches := make([]KeysSearch, 2) + searches[0] = KeysSearch{KeyFrom: []byte("keyC0"), KeyPrefix: []byte("keyC"), KeyEnd: []byte("keyC1")} + searches[1] = KeysSearch{KeyFrom: []byte("k"), KeyPrefix: []byte("keyC"), KeyEnd: []byte("keyC1")} + + manyManyKeys := iter.ManySearchKeys(searches, 1000000) + defer manyManyKeys.Destroy() + result := manyManyKeys.Result() + if len(result) != len(searches) { + t.Fatalf("result len should be %d", len(searches)) + } + ensure.DeepEqual(t, result[0].Found(), 1) + ensure.DeepEqual(t, result[0].Keys(), [][]byte{[]byte("keyC0")}) + ensure.DeepEqual(t, result[0].Values(), [][]byte{[]byte("val_keyC0")}) + ensure.DeepEqual(t, result[1].Found(), 2) + ensure.DeepEqual(t, result[1].Keys(), [][]byte{[]byte("keyC"), []byte("keyC0")}) + ensure.DeepEqual(t, result[1].Values(), [][]byte{[]byte("val_keyC"), []byte("val_keyC0")}) +} From fef8892f0eca94bcde9e3eb78f4a4f824fbdc5fe Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Mon, 2 Oct 2017 17:19:29 +0200 Subject: [PATCH 11/55] add limit config to search --- gorocksdb.c | 4 ++-- gorocksdb.h | 3 ++- iterator.go | 7 ++++--- iterator_test.go | 30 +++++++++++++++--------------- 4 files changed, 23 insertions(+), 21 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index 4f876988..cfd8462c 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -178,7 +178,7 @@ void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { free(many_keys); } -gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size, int max_per_iter) { +gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size) { int i; gorocksdb_many_keys_filter_t key_filter; gorocksdb_many_keys_t** result = (gorocksdb_many_keys_t**) malloc(size*sizeof(gorocksdb_many_keys_t*)); @@ -188,7 +188,7 @@ gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, con key_filter.key_prefix_s = keys_searches[i].key_prefix_s; key_filter.key_end = keys_searches[i].key_end; key_filter.key_end_s = keys_searches[i].key_end_s; - result[i] = gorocksdb_iter_next_many_keys_f(iter, max_per_iter, &key_filter); + result[i] = gorocksdb_iter_next_many_keys_f(iter, keys_searches[i].limit, &key_filter); } return result; } diff --git a/gorocksdb.h b/gorocksdb.h index 31d1473a..b83a1949 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -62,10 +62,11 @@ typedef struct { size_t key_prefix_s; char* key_end; size_t key_end_s; + int limit; } gorocksdb_keys_search_t; -gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size, int max_per_iter); +gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size); void gorocksdb_destroy_many_many_keys(gorocksdb_many_keys_t** many_many_keys, int size); diff --git a/iterator.go b/iterator.go index 0f9e42d5..27154a23 100644 --- a/iterator.go +++ b/iterator.go @@ -143,6 +143,7 @@ func (iter *Iterator) NextManyKeysF(size int, keyPrefix, keyEnd []byte) *ManyKey type KeysSearch struct { KeyFrom, KeyPrefix, KeyEnd []byte + Limit int } type ManyManyKeys struct { @@ -150,11 +151,11 @@ type ManyManyKeys struct { size int } -func (iter *Iterator) ManySearchKeys(searches []KeysSearch, maxIterPerSearch int) *ManyManyKeys { +func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { nbSearches := len(searches) cManyKeysSearches := make([]C.gorocksdb_keys_search_t, nbSearches) for i, search := range searches { - cKSearch := C.gorocksdb_keys_search_t{} + cKSearch := C.gorocksdb_keys_search_t{limit:C.int(search.Limit)} cKFrom := C.CString(string(search.KeyFrom)) defer C.free(unsafe.Pointer(cKFrom)) cKSearch.key_from = cKFrom @@ -175,7 +176,7 @@ func (iter *Iterator) ManySearchKeys(searches []KeysSearch, maxIterPerSearch int } cManyManyKeys := C.gorocksdb_many_search_keys(iter.c, (*C.gorocksdb_keys_search_t)(unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(&cManyKeysSearches)).Data)), - C.int(nbSearches), C.int(maxIterPerSearch)) + C.int(nbSearches)) return &ManyManyKeys{c: cManyManyKeys, size: nbSearches} } diff --git a/iterator_test.go b/iterator_test.go index 979ea47e..1116f860 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -195,11 +195,11 @@ func TestIteratorManySearchKeys(t *testing.T) { defer iter.Close() searches := make([]KeysSearch, 3) - searches[0] = KeysSearch{KeyFrom: []byte("A")} - searches[1] = KeysSearch{KeyFrom: []byte("D")} - searches[2] = KeysSearch{KeyFrom: []byte("Z")} + searches[0] = KeysSearch{KeyFrom: []byte("A"), Limit:1000} + searches[1] = KeysSearch{KeyFrom: []byte("D"), Limit:1000} + searches[2] = KeysSearch{KeyFrom: []byte("Z"), Limit:1000} - manyManyKeys := iter.ManySearchKeys(searches, 1000000) + manyManyKeys := iter.ManySearchKeys(searches) defer manyManyKeys.Destroy() result := manyManyKeys.Result() if len(result) != len(searches) { @@ -232,12 +232,12 @@ func TestIteratorManySearchKeysWithKeyPrefix(t *testing.T) { defer iter.Close() searches := make([]KeysSearch, 4) - searches[0] = KeysSearch{KeyFrom: []byte("A"), KeyPrefix: []byte("A")} - searches[1] = KeysSearch{KeyFrom: []byte("B"), KeyPrefix: []byte("B")} - searches[2] = KeysSearch{KeyFrom: []byte("D"), KeyPrefix: []byte("D")} - searches[3] = KeysSearch{KeyFrom: []byte("Z"), KeyPrefix: []byte("Z")} + searches[0] = KeysSearch{KeyFrom: []byte("A"), KeyPrefix: []byte("A"), Limit:1000} + searches[1] = KeysSearch{KeyFrom: []byte("B"), KeyPrefix: []byte("B"), Limit:1000} + searches[2] = KeysSearch{KeyFrom: []byte("D"), KeyPrefix: []byte("D"), Limit:1000} + searches[3] = KeysSearch{KeyFrom: []byte("Z"), KeyPrefix: []byte("Z"), Limit:1000} - manyManyKeys := iter.ManySearchKeys(searches, 1000000) + manyManyKeys := iter.ManySearchKeys(searches) defer manyManyKeys.Destroy() result := manyManyKeys.Result() if len(result) != len(searches) { @@ -273,10 +273,10 @@ func TestIteratorManySearchKeysWithKeyEnd(t *testing.T) { defer iter.Close() searches := make([]KeysSearch, 2) - searches[0] = KeysSearch{KeyFrom: []byte("A"), KeyEnd: []byte("A3")} - searches[1] = KeysSearch{KeyFrom: []byte("B"), KeyEnd: []byte("B2")} + searches[0] = KeysSearch{KeyFrom: []byte("A"), KeyEnd: []byte("A3"), Limit:1000} + searches[1] = KeysSearch{KeyFrom: []byte("B"), KeyEnd: []byte("B2"), Limit:1000} - manyManyKeys := iter.ManySearchKeys(searches, 1000000) + manyManyKeys := iter.ManySearchKeys(searches) defer manyManyKeys.Destroy() result := manyManyKeys.Result() if len(result) != len(searches) { @@ -306,10 +306,10 @@ func TestIteratorManySearchKeysWithKeyPrefixAndEnd(t *testing.T) { defer iter.Close() searches := make([]KeysSearch, 2) - searches[0] = KeysSearch{KeyFrom: []byte("keyC0"), KeyPrefix: []byte("keyC"), KeyEnd: []byte("keyC1")} - searches[1] = KeysSearch{KeyFrom: []byte("k"), KeyPrefix: []byte("keyC"), KeyEnd: []byte("keyC1")} + searches[0] = KeysSearch{KeyFrom: []byte("keyC0"), KeyPrefix: []byte("keyC"), KeyEnd: []byte("keyC1"), Limit:1000} + searches[1] = KeysSearch{KeyFrom: []byte("k"), KeyPrefix: []byte("keyC"), KeyEnd: []byte("keyC1"), Limit:1000} - manyManyKeys := iter.ManySearchKeys(searches, 1000000) + manyManyKeys := iter.ManySearchKeys(searches) defer manyManyKeys.Destroy() result := manyManyKeys.Result() if len(result) != len(searches) { From 3d951f2ce96a1d54835d65c3b185272bfda1f86c Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Tue, 3 Oct 2017 08:53:14 +0200 Subject: [PATCH 12/55] dynamic allocation + handle infinite limit --- gorocksdb.c | 29 ++++++++++++++++------ gorocksdb.h | 2 +- iterator.go | 4 ++-- iterator_test.go | 62 +++++++++++++++++++++++++++++++++++++++--------- 4 files changed, 76 insertions(+), 21 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index cfd8462c..ae4b843e 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -96,7 +96,7 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, i return many_keys; } -gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size, const gorocksdb_many_keys_filter_t* key_filter) { +gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter) { int i; char** keys, **values; size_t* key_sizes, *value_sizes; @@ -104,16 +104,18 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, // todo: we malloc the prefetch size (improve it) gorocksdb_many_keys_t* many_keys = (gorocksdb_many_keys_t*) malloc(sizeof(gorocksdb_many_keys_t)); + + int size = 512; + if (limit > 0 && limit < size) { + size = limit; + } keys = (char**) malloc(size * sizeof(char*)); key_sizes = (size_t*) malloc(size * sizeof(size_t)); values = (char**) malloc(size * sizeof(char*)); value_sizes = (size_t*) malloc(size * sizeof(size_t)); i = 0; - while (i < size) { - if (!rocksdb_iter_valid(iter)) { - break; - } + while (rocksdb_iter_valid(iter)) { // Get key const char* key = rocksdb_iter_key(iter, &key_size); // Check filter @@ -136,6 +138,14 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, } } // Store key + if (i == size) { + // realloc 2x existing size + size = size*2; + keys = (char**) realloc(keys, size * sizeof(char*)); + key_sizes = (size_t*) realloc(key_sizes, size * sizeof(size_t)); + values = (char**) realloc(values, size * sizeof(char*)); + value_sizes = (size_t*) realloc(value_sizes, size * sizeof(size_t)); + } keys[i] = (char*) malloc(key_size * sizeof(char)); memcpy(keys[i], key, key_size); key_sizes[i] = key_size; @@ -148,9 +158,14 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, values[i] = NULL; } value_sizes[i] = value_size; - // next - rocksdb_iter_next(iter); i++; + // seek next + rocksdb_iter_next(iter); + + // check limit + if (limit > 0 && i == limit) { + break; + } } many_keys->keys = keys; diff --git a/gorocksdb.h b/gorocksdb.h index b83a1949..6e8ef724 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -51,7 +51,7 @@ typedef struct { } gorocksdb_many_keys_filter_t; -extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size, const gorocksdb_many_keys_filter_t* key_filter); +extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter); extern void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys); diff --git a/iterator.go b/iterator.go index 27154a23..08b75f0d 100644 --- a/iterator.go +++ b/iterator.go @@ -124,7 +124,7 @@ func (iter *Iterator) NextManyKeys(size int) *ManyKeys { } //.... -func (iter *Iterator) NextManyKeysF(size int, keyPrefix, keyEnd []byte) *ManyKeys { +func (iter *Iterator) NextManyKeysF(limit int, keyPrefix, keyEnd []byte) *ManyKeys { cKeyFilter := C.gorocksdb_many_keys_filter_t{} if len(keyPrefix) > 0 { cKeyPrefix := C.CString(string(keyPrefix)) @@ -138,7 +138,7 @@ func (iter *Iterator) NextManyKeysF(size int, keyPrefix, keyEnd []byte) *ManyKey cKeyFilter.key_end = cKeyEnd cKeyFilter.key_end_s = C.size_t(len(keyEnd)) } - return &ManyKeys{c: C.gorocksdb_iter_next_many_keys_f(iter.c, C.int(size), &cKeyFilter)} + return &ManyKeys{c: C.gorocksdb_iter_next_many_keys_f(iter.c, C.int(limit), &cKeyFilter)} } type KeysSearch struct { diff --git a/iterator_test.go b/iterator_test.go index 1116f860..028ac388 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -101,6 +101,46 @@ func TestIteratorManyFOnKeyPrefix(t *testing.T) { ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3")}) } +func TestIteratorManyFWithLimit(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + + iter.SeekToFirst() + manyKeys := iter.NextManyKeysF(-1, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) + manyKeys.Destroy() + + iter.SeekToFirst() + manyKeys = iter.NextManyKeysF(0, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) + manyKeys.Destroy() + + iter.SeekToFirst() + manyKeys = iter.NextManyKeysF(2, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2")}) + manyKeys.Destroy() + + iter.SeekToFirst() + manyKeys = iter.NextManyKeysF(20, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) + manyKeys.Destroy() +} + func TestIteratorManyFOnKeyEnd(t *testing.T) { db := newTestDB(t, "TestIterator", nil) defer db.Close() @@ -195,9 +235,9 @@ func TestIteratorManySearchKeys(t *testing.T) { defer iter.Close() searches := make([]KeysSearch, 3) - searches[0] = KeysSearch{KeyFrom: []byte("A"), Limit:1000} - searches[1] = KeysSearch{KeyFrom: []byte("D"), Limit:1000} - searches[2] = KeysSearch{KeyFrom: []byte("Z"), Limit:1000} + searches[0] = KeysSearch{KeyFrom: []byte("A"), Limit: 1000} + searches[1] = KeysSearch{KeyFrom: []byte("D"), Limit: 1000} + searches[2] = KeysSearch{KeyFrom: []byte("Z"), Limit: 1000} manyManyKeys := iter.ManySearchKeys(searches) defer manyManyKeys.Destroy() @@ -232,10 +272,10 @@ func TestIteratorManySearchKeysWithKeyPrefix(t *testing.T) { defer iter.Close() searches := make([]KeysSearch, 4) - searches[0] = KeysSearch{KeyFrom: []byte("A"), KeyPrefix: []byte("A"), Limit:1000} - searches[1] = KeysSearch{KeyFrom: []byte("B"), KeyPrefix: []byte("B"), Limit:1000} - searches[2] = KeysSearch{KeyFrom: []byte("D"), KeyPrefix: []byte("D"), Limit:1000} - searches[3] = KeysSearch{KeyFrom: []byte("Z"), KeyPrefix: []byte("Z"), Limit:1000} + searches[0] = KeysSearch{KeyFrom: []byte("A"), KeyPrefix: []byte("A"), Limit: 1000} + searches[1] = KeysSearch{KeyFrom: []byte("B"), KeyPrefix: []byte("B"), Limit: 1000} + searches[2] = KeysSearch{KeyFrom: []byte("D"), KeyPrefix: []byte("D"), Limit: 1000} + searches[3] = KeysSearch{KeyFrom: []byte("Z"), KeyPrefix: []byte("Z"), Limit: 1000} manyManyKeys := iter.ManySearchKeys(searches) defer manyManyKeys.Destroy() @@ -273,8 +313,8 @@ func TestIteratorManySearchKeysWithKeyEnd(t *testing.T) { defer iter.Close() searches := make([]KeysSearch, 2) - searches[0] = KeysSearch{KeyFrom: []byte("A"), KeyEnd: []byte("A3"), Limit:1000} - searches[1] = KeysSearch{KeyFrom: []byte("B"), KeyEnd: []byte("B2"), Limit:1000} + searches[0] = KeysSearch{KeyFrom: []byte("A"), KeyEnd: []byte("A3"), Limit: 1000} + searches[1] = KeysSearch{KeyFrom: []byte("B"), KeyEnd: []byte("B2"), Limit: 1000} manyManyKeys := iter.ManySearchKeys(searches) defer manyManyKeys.Destroy() @@ -306,8 +346,8 @@ func TestIteratorManySearchKeysWithKeyPrefixAndEnd(t *testing.T) { defer iter.Close() searches := make([]KeysSearch, 2) - searches[0] = KeysSearch{KeyFrom: []byte("keyC0"), KeyPrefix: []byte("keyC"), KeyEnd: []byte("keyC1"), Limit:1000} - searches[1] = KeysSearch{KeyFrom: []byte("k"), KeyPrefix: []byte("keyC"), KeyEnd: []byte("keyC1"), Limit:1000} + searches[0] = KeysSearch{KeyFrom: []byte("keyC0"), KeyPrefix: []byte("keyC"), KeyEnd: []byte("keyC1"), Limit: 1000} + searches[1] = KeysSearch{KeyFrom: []byte("k"), KeyPrefix: []byte("keyC"), KeyEnd: []byte("keyC1"), Limit: 1000} manyManyKeys := iter.ManySearchKeys(searches) defer manyManyKeys.Destroy() From 82118bffd6df267a2a28d2128a7b74669d3fdd25 Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Tue, 3 Oct 2017 16:05:06 +0200 Subject: [PATCH 13/55] add ManyKeysPageAllocSize --- gorocksdb.c | 11 +++++++---- gorocksdb.h | 4 ++-- iterator.go | 47 +++++++++++++++++++++++++++-------------------- 3 files changed, 36 insertions(+), 26 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index ae4b843e..b5082639 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -96,7 +96,7 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, i return many_keys; } -gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter) { +gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter, int page_alloc_size) { int i; char** keys, **values; size_t* key_sizes, *value_sizes; @@ -105,7 +105,10 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, // todo: we malloc the prefetch size (improve it) gorocksdb_many_keys_t* many_keys = (gorocksdb_many_keys_t*) malloc(sizeof(gorocksdb_many_keys_t)); - int size = 512; + int size = page_alloc_size; + if (size <= 0) { + size = 512; + } if (limit > 0 && limit < size) { size = limit; } @@ -193,7 +196,7 @@ void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { free(many_keys); } -gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size) { +gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size, int page_alloc_size) { int i; gorocksdb_many_keys_filter_t key_filter; gorocksdb_many_keys_t** result = (gorocksdb_many_keys_t**) malloc(size*sizeof(gorocksdb_many_keys_t*)); @@ -203,7 +206,7 @@ gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, con key_filter.key_prefix_s = keys_searches[i].key_prefix_s; key_filter.key_end = keys_searches[i].key_end; key_filter.key_end_s = keys_searches[i].key_end_s; - result[i] = gorocksdb_iter_next_many_keys_f(iter, keys_searches[i].limit, &key_filter); + result[i] = gorocksdb_iter_next_many_keys_f(iter, keys_searches[i].limit, &key_filter, page_alloc_size); } return result; } diff --git a/gorocksdb.h b/gorocksdb.h index 6e8ef724..346c146f 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -51,7 +51,7 @@ typedef struct { } gorocksdb_many_keys_filter_t; -extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter); +extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter, int page_alloc_size); extern void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys); @@ -66,7 +66,7 @@ typedef struct { } gorocksdb_keys_search_t; -gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size); +gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size, int page_alloc_size); void gorocksdb_destroy_many_many_keys(gorocksdb_many_keys_t** many_many_keys, int size); diff --git a/iterator.go b/iterator.go index 08b75f0d..e86a031d 100644 --- a/iterator.go +++ b/iterator.go @@ -81,6 +81,8 @@ func (iter *Iterator) Next() { C.rocksdb_iter_next(iter.c) } +var ManyKeysPageAllocSize int = 512 + type ManyKeys struct { c *C.gorocksdb_many_keys_t } @@ -138,12 +140,12 @@ func (iter *Iterator) NextManyKeysF(limit int, keyPrefix, keyEnd []byte) *ManyKe cKeyFilter.key_end = cKeyEnd cKeyFilter.key_end_s = C.size_t(len(keyEnd)) } - return &ManyKeys{c: C.gorocksdb_iter_next_many_keys_f(iter.c, C.int(limit), &cKeyFilter)} + return &ManyKeys{c: C.gorocksdb_iter_next_many_keys_f(iter.c, C.int(limit), &cKeyFilter, C.int(ManyKeysPageAllocSize))} } type KeysSearch struct { KeyFrom, KeyPrefix, KeyEnd []byte - Limit int + Limit int } type ManyManyKeys struct { @@ -154,30 +156,35 @@ type ManyManyKeys struct { func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { nbSearches := len(searches) cManyKeysSearches := make([]C.gorocksdb_keys_search_t, nbSearches) - for i, search := range searches { - cKSearch := C.gorocksdb_keys_search_t{limit:C.int(search.Limit)} - cKFrom := C.CString(string(search.KeyFrom)) - defer C.free(unsafe.Pointer(cKFrom)) - cKSearch.key_from = cKFrom - cKSearch.key_from_s = C.size_t(len(search.KeyFrom)) - if len(search.KeyPrefix) > 0 { - cKPrefix := C.CString(string(search.KeyPrefix)) - defer C.free(unsafe.Pointer(cKPrefix)) - cKSearch.key_prefix = cKPrefix - cKSearch.key_prefix_s = C.size_t(len(search.KeyPrefix)) + for i := range searches { + cKSearch := C.gorocksdb_keys_search_t{limit: C.int(searches[i].Limit)} + cKSearch.key_from = C.CString(string(searches[i].KeyFrom)) + cKSearch.key_from_s = C.size_t(len(searches[i].KeyFrom)) + if len(searches[i].KeyPrefix) > 0 { + cKSearch.key_prefix = C.CString(string(searches[i].KeyPrefix)) + cKSearch.key_prefix_s = C.size_t(len(searches[i].KeyPrefix)) } - if len(search.KeyEnd) > 0 { - cKEnd := C.CString(string(search.KeyEnd)) - defer C.free(unsafe.Pointer(cKEnd)) - cKSearch.key_end = cKEnd - cKSearch.key_end_s = C.size_t(len(search.KeyEnd)) + if len(searches[i].KeyEnd) > 0 { + cKSearch.key_end = C.CString(string(searches[i].KeyEnd)) + cKSearch.key_end_s = C.size_t(len(searches[i].KeyEnd)) } cManyKeysSearches[i] = cKSearch } cManyManyKeys := C.gorocksdb_many_search_keys(iter.c, (*C.gorocksdb_keys_search_t)(unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(&cManyKeysSearches)).Data)), - C.int(nbSearches)) - + C.int(nbSearches), + C.int(ManyKeysPageAllocSize), + ) + // free + for i := range searches { + C.free(unsafe.Pointer(cManyKeysSearches[i].key_from)) + if len(searches[i].KeyPrefix) > 0 { + C.free(unsafe.Pointer(cManyKeysSearches[i].key_prefix)) + } + if len(searches[i].KeyEnd) > 0 { + C.free(unsafe.Pointer(cManyKeysSearches[i].key_end)) + } + } return &ManyManyKeys{c: cManyManyKeys, size: nbSearches} } From e901e9b4baa4fb00e52e1426d7e1531e4fafe267 Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Tue, 3 Oct 2017 16:19:17 +0200 Subject: [PATCH 14/55] add ManyKeys.Each function --- iterator.go | 15 +++++++++++++++ iterator_test.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/iterator.go b/iterator.go index e86a031d..dfa4fd8d 100644 --- a/iterator.go +++ b/iterator.go @@ -120,6 +120,21 @@ func (m *ManyKeys) Values() [][]byte { return values } +func (m *ManyKeys) Each(each func(key []byte, value []byte)) { + found := m.Found() + for i := uintptr(0); i < uintptr(found); i++ { + chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.keys)) + i*unsafe.Sizeof(m.c.keys))) + size := *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.key_sizes)) + i*unsafe.Sizeof(m.c.key_sizes))) + key := charToByte(chars, size) + + chars = *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.values)) + i*unsafe.Sizeof(m.c.values))) + size = *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.value_sizes)) + i*unsafe.Sizeof(m.c.value_sizes))) + value := charToByte(chars, size) + + each(key, value) + } +} + //.... func (iter *Iterator) NextManyKeys(size int) *ManyKeys { return &ManyKeys{c: C.gorocksdb_iter_next_many_keys(iter.c, C.int(size))} diff --git a/iterator_test.go b/iterator_test.go index 028ac388..6c6fb941 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -362,3 +362,33 @@ func TestIteratorManySearchKeysWithKeyPrefixAndEnd(t *testing.T) { ensure.DeepEqual(t, result[1].Keys(), [][]byte{[]byte("keyC"), []byte("keyC0")}) ensure.DeepEqual(t, result[1].Values(), [][]byte{[]byte("val_keyC"), []byte("val_keyC0")}) } + +func TestIteratorManyKeysEach(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + + iter.SeekToFirst() + manyKeys := iter.NextManyKeysF(-1, []byte("keyA"), nil) + + actualKeys := [][]byte{} + actualValues := [][]byte{} + manyKeys.Each(func(key []byte, value []byte) { + actualKeys = append(actualKeys, key) + actualValues = append(actualValues, value) + }) + + ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) + ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) + manyKeys.Destroy() +} \ No newline at end of file From f3ae3b962318bd2e3419e68d127a81a1231ea4b4 Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Tue, 3 Oct 2017 16:24:31 +0200 Subject: [PATCH 15/55] add Each index --- iterator.go | 4 ++-- iterator_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/iterator.go b/iterator.go index dfa4fd8d..4ed382f8 100644 --- a/iterator.go +++ b/iterator.go @@ -120,7 +120,7 @@ func (m *ManyKeys) Values() [][]byte { return values } -func (m *ManyKeys) Each(each func(key []byte, value []byte)) { +func (m *ManyKeys) Each(each func(i int, key []byte, value []byte)) { found := m.Found() for i := uintptr(0); i < uintptr(found); i++ { chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.keys)) + i*unsafe.Sizeof(m.c.keys))) @@ -131,7 +131,7 @@ func (m *ManyKeys) Each(each func(key []byte, value []byte)) { size = *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.value_sizes)) + i*unsafe.Sizeof(m.c.value_sizes))) value := charToByte(chars, size) - each(key, value) + each(int(i), key, value) } } diff --git a/iterator_test.go b/iterator_test.go index 6c6fb941..19272df2 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -383,7 +383,7 @@ func TestIteratorManyKeysEach(t *testing.T) { actualKeys := [][]byte{} actualValues := [][]byte{} - manyKeys.Each(func(key []byte, value []byte) { + manyKeys.Each(func(i int, key []byte, value []byte) { actualKeys = append(actualKeys, key) actualValues = append(actualValues, value) }) From 8d9db7ce9641202c430c66dca53cbc59c8bb7255 Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Tue, 3 Oct 2017 16:41:51 +0200 Subject: [PATCH 16/55] add should continue --- iterator.go | 6 ++++-- iterator_test.go | 16 ++++++++++++++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/iterator.go b/iterator.go index 4ed382f8..d92adbe6 100644 --- a/iterator.go +++ b/iterator.go @@ -120,7 +120,7 @@ func (m *ManyKeys) Values() [][]byte { return values } -func (m *ManyKeys) Each(each func(i int, key []byte, value []byte)) { +func (m *ManyKeys) Each(each func(i int, key []byte, value []byte) bool) { found := m.Found() for i := uintptr(0); i < uintptr(found); i++ { chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.keys)) + i*unsafe.Sizeof(m.c.keys))) @@ -131,7 +131,9 @@ func (m *ManyKeys) Each(each func(i int, key []byte, value []byte)) { size = *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.value_sizes)) + i*unsafe.Sizeof(m.c.value_sizes))) value := charToByte(chars, size) - each(int(i), key, value) + if !each(int(i), key, value) { + break + } } } diff --git a/iterator_test.go b/iterator_test.go index 19272df2..81e8f0d2 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -383,12 +383,24 @@ func TestIteratorManyKeysEach(t *testing.T) { actualKeys := [][]byte{} actualValues := [][]byte{} - manyKeys.Each(func(i int, key []byte, value []byte) { + manyKeys.Each(func(i int, key []byte, value []byte) bool { actualKeys = append(actualKeys, key) actualValues = append(actualValues, value) + return true }) - ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) + + actualKeys = nil + actualValues = nil + limit := 2 + manyKeys.Each(func(i int, key []byte, value []byte) bool { + actualKeys = append(actualKeys, key) + actualValues = append(actualValues, value) + return len(actualKeys) != limit + }) + ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA1"), []byte("keyA2")}) + ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA1"), []byte("val_keyA2")}) + manyKeys.Destroy() } \ No newline at end of file From 76b2f4a58cb5d4a157a6af2c08f728a6abfef3ec Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Tue, 3 Oct 2017 16:48:36 +0200 Subject: [PATCH 17/55] Each returns if all have been processsed or no --- iterator.go | 5 +++-- iterator_test.go | 6 ++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/iterator.go b/iterator.go index d92adbe6..a0ec3dfb 100644 --- a/iterator.go +++ b/iterator.go @@ -120,7 +120,7 @@ func (m *ManyKeys) Values() [][]byte { return values } -func (m *ManyKeys) Each(each func(i int, key []byte, value []byte) bool) { +func (m *ManyKeys) Each(each func(i int, key []byte, value []byte) bool) bool { found := m.Found() for i := uintptr(0); i < uintptr(found); i++ { chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.keys)) + i*unsafe.Sizeof(m.c.keys))) @@ -132,9 +132,10 @@ func (m *ManyKeys) Each(each func(i int, key []byte, value []byte) bool) { value := charToByte(chars, size) if !each(int(i), key, value) { - break + return false } } + return true } //.... diff --git a/iterator_test.go b/iterator_test.go index 81e8f0d2..79dd34f1 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -383,22 +383,24 @@ func TestIteratorManyKeysEach(t *testing.T) { actualKeys := [][]byte{} actualValues := [][]byte{} - manyKeys.Each(func(i int, key []byte, value []byte) bool { + all := manyKeys.Each(func(i int, key []byte, value []byte) bool { actualKeys = append(actualKeys, key) actualValues = append(actualValues, value) return true }) + ensure.DeepEqual(t, all, true) ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) actualKeys = nil actualValues = nil limit := 2 - manyKeys.Each(func(i int, key []byte, value []byte) bool { + all = manyKeys.Each(func(i int, key []byte, value []byte) bool { actualKeys = append(actualKeys, key) actualValues = append(actualValues, value) return len(actualKeys) != limit }) + ensure.DeepEqual(t, all, false) ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA1"), []byte("keyA2")}) ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA1"), []byte("val_keyA2")}) From 860863706ebd9471f4a56ed0a80681ab28873070 Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Wed, 4 Oct 2017 14:44:10 +0200 Subject: [PATCH 18/55] cgo calls saver --- gorocksdb.c | 31 ++++++++++++++++++++++++++++--- gorocksdb.h | 27 +++++++++++++++++++++++---- 2 files changed, 51 insertions(+), 7 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index b5082639..a9e97eab 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -179,7 +179,7 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, return many_keys; } -void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { +extern void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { int i; for (i = 0; i < many_keys->found; i++) { free(many_keys->keys[i]); @@ -196,7 +196,7 @@ void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { free(many_keys); } -gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size, int page_alloc_size) { +extern gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size, int page_alloc_size) { int i; gorocksdb_many_keys_filter_t key_filter; gorocksdb_many_keys_t** result = (gorocksdb_many_keys_t**) malloc(size*sizeof(gorocksdb_many_keys_t*)); @@ -211,7 +211,7 @@ gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, con return result; } -void gorocksdb_destroy_many_many_keys(gorocksdb_many_keys_t** many_many_keys, int size) { +extern void gorocksdb_destroy_many_many_keys(gorocksdb_many_keys_t** many_many_keys, int size) { int i; for (i = 0; i < size; i++) { gorocksdb_destroy_many_keys(many_many_keys[i]); @@ -219,3 +219,28 @@ void gorocksdb_destroy_many_many_keys(gorocksdb_many_keys_t** many_many_keys, in free(many_many_keys); } +extern gorocksdb_many_keys_t** gorocksdb_many_search_keys_raw( + rocksdb_iterator_t* iter, + char** key_froms, + size_t* key_from_s, + char** key_prefixes, + size_t* key_prefix_s, + char** key_ends, + size_t* key_end_s, + int* limits, + int size, + int page_alloc_size +) { + int i; + gorocksdb_many_keys_filter_t key_filter; + gorocksdb_many_keys_t** result = (gorocksdb_many_keys_t**) malloc(size*sizeof(gorocksdb_many_keys_t*)); + for (i=0; i < size; i++) { + rocksdb_iter_seek(iter, key_froms[i], key_from_s[i]); + key_filter.key_prefix = key_prefixes[i]; + key_filter.key_prefix_s = key_prefix_s[i]; + key_filter.key_end = key_ends[i]; + key_filter.key_end_s = key_end_s[i]; + result[i] = gorocksdb_iter_next_many_keys_f(iter, limits[i], &key_filter, page_alloc_size); + } + return result; +} diff --git a/gorocksdb.h b/gorocksdb.h index 346c146f..1967ed0a 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -55,6 +55,8 @@ extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t extern void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys); +/* Batch searches */ + typedef struct { char* key_from; size_t key_from_s; @@ -66,7 +68,24 @@ typedef struct { } gorocksdb_keys_search_t; -gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size, int page_alloc_size); - -void gorocksdb_destroy_many_many_keys(gorocksdb_many_keys_t** many_many_keys, int size); - +extern gorocksdb_many_keys_t** gorocksdb_many_search_keys( + rocksdb_iterator_t* iter, + const gorocksdb_keys_search_t* keys_searches, + int size, + int page_alloc_size +); + +gorocksdb_many_keys_t** gorocksdb_many_search_keys_raw( + rocksdb_iterator_t* iter, + char** key_froms, + size_t* key_from_s, + char** key_prefixes, + size_t* key_prefix_s, + char** key_ends, + size_t* key_end_s, + int* limits, + int size, + int page_alloc_size +); + +extern void gorocksdb_destroy_many_many_keys(gorocksdb_many_keys_t** many_many_keys, int size); From 2055f46df4056c8f11e0ba1848c90319e128fdd9 Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Wed, 4 Oct 2017 14:44:25 +0200 Subject: [PATCH 19/55] cgo calls saver --- iterator.go | 41 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/iterator.go b/iterator.go index a0ec3dfb..fd12729e 100644 --- a/iterator.go +++ b/iterator.go @@ -171,7 +171,7 @@ type ManyManyKeys struct { size int } -func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { +func (iter *Iterator) ManySearchKeysV1(searches []KeysSearch) *ManyManyKeys { nbSearches := len(searches) cManyKeysSearches := make([]C.gorocksdb_keys_search_t, nbSearches) for i := range searches { @@ -206,6 +206,45 @@ func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { return &ManyManyKeys{c: cManyManyKeys, size: nbSearches} } +func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { + nbSearches := len(searches) + + cKeyFroms := C.malloc(C.size_t(nbSearches) * C.size_t(unsafe.Sizeof(uintptr(0)))) + defer C.free(cKeyFroms) + cKeyPrefixes := C.malloc(C.size_t(nbSearches) * C.size_t(unsafe.Sizeof(uintptr(0)))) + defer C.free(cKeyPrefixes) + cKeyEnds := C.malloc(C.size_t(nbSearches) * C.size_t(unsafe.Sizeof(uintptr(0)))) + defer C.free(cKeyEnds) + cKeyFromSizes := make([]C.size_t, nbSearches) + cKeyPrefixSizes := make([]C.size_t, nbSearches) + cKeyEndSizes := make([]C.size_t, nbSearches) + cLimits := make([]C.int, nbSearches) + + for i := uintptr(0); i < uintptr(nbSearches); i++ { + search := searches[i] + *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(cKeyFroms)) + i*unsafe.Sizeof(cKeyFroms))) = byteToChar(search.KeyFrom) + *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(cKeyPrefixes)) + i*unsafe.Sizeof(cKeyPrefixes))) = byteToChar(search.KeyPrefix) + *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(cKeyEnds)) + i*unsafe.Sizeof(cKeyEnds))) = byteToChar(search.KeyEnd) + cKeyFromSizes[i] = C.size_t(len(searches[i].KeyFrom)) + cKeyPrefixSizes[i] = C.size_t(len(searches[i].KeyPrefix)) + cKeyEndSizes[i] = C.size_t(len(searches[i].KeyEnd)) + cLimits[i] = C.int(searches[i].Limit) + } + cManyManyKeys := C.gorocksdb_many_search_keys_raw( + iter.c, + (**C.char)(unsafe.Pointer(cKeyFroms)), + (*C.size_t)(unsafe.Pointer(&cKeyFromSizes[0])), + (**C.char)(unsafe.Pointer(cKeyPrefixes)), + (*C.size_t)(unsafe.Pointer(&cKeyPrefixSizes[0])), + (**C.char)(unsafe.Pointer(cKeyEnds)), + (*C.size_t)(unsafe.Pointer(&cKeyEndSizes[0])), + (*C.int)(unsafe.Pointer(&cLimits[0])), + C.int(nbSearches), + C.int(ManyKeysPageAllocSize), + ) + return &ManyManyKeys{c: cManyManyKeys, size: nbSearches} +} + func (m ManyManyKeys) Result() []*ManyKeys { result := make([]*ManyKeys, m.size) for i := uintptr(0); i < uintptr(m.size); i++ { From 21b73ec1ecfdab0a4a8bee2225379598170afd5e Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Wed, 4 Oct 2017 15:15:22 +0200 Subject: [PATCH 20/55] ManySearchKeysExp --- iterator.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/iterator.go b/iterator.go index fd12729e..44b0fb2a 100644 --- a/iterator.go +++ b/iterator.go @@ -171,7 +171,7 @@ type ManyManyKeys struct { size int } -func (iter *Iterator) ManySearchKeysV1(searches []KeysSearch) *ManyManyKeys { +func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { nbSearches := len(searches) cManyKeysSearches := make([]C.gorocksdb_keys_search_t, nbSearches) for i := range searches { @@ -206,7 +206,7 @@ func (iter *Iterator) ManySearchKeysV1(searches []KeysSearch) *ManyManyKeys { return &ManyManyKeys{c: cManyManyKeys, size: nbSearches} } -func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { +func (iter *Iterator) ManySearchKeysExp(searches []KeysSearch) *ManyManyKeys { nbSearches := len(searches) cKeyFroms := C.malloc(C.size_t(nbSearches) * C.size_t(unsafe.Sizeof(uintptr(0)))) From ebee24116a34401d10df6f7cc28c2a40568ab467 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Thu, 5 Oct 2017 12:36:46 -0600 Subject: [PATCH 21/55] Add PutMany C API --- gorocksdb.c | 29 +++++++++++++++++++++++++++++ gorocksdb.h | 19 +++++++++++++++++++ util.go | 6 +----- write_batch.go | 38 ++++++++++++++++++++++++-------------- write_batch_test.go | 37 +++++++++++++++++++++++++++++++++++++ 5 files changed, 110 insertions(+), 19 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index a9e97eab..b1dc67e4 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -244,3 +244,32 @@ extern gorocksdb_many_keys_t** gorocksdb_many_search_keys_raw( } return result; } + +void gorocksdb_writebatch_put_many( + rocksdb_writebatch_t* batch, + size_t num_pairs, + char** keys, + size_t* key_sizes, + char** values, + size_t* value_sizes +) { + int i; + for (i=0; i < num_pairs; i++) { + rocksdb_writebatch_put(batch, keys[i], key_sizes[i], values[i], value_sizes[i]); + } +} + +void gorocksdb_writebatch_put_many_cf( + rocksdb_writebatch_t* batch, + rocksdb_column_family_handle_t* cf, + size_t num_pairs, + char** keys, + size_t* key_sizes, + char** values, + size_t* value_sizes +) { + int i; + for (i=0; i < num_pairs; i++) { + rocksdb_writebatch_put_cf(batch, cf, keys[i], key_sizes[i], values[i], value_sizes[i]); + } +} diff --git a/gorocksdb.h b/gorocksdb.h index 1967ed0a..c5d40fd6 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -89,3 +89,22 @@ gorocksdb_many_keys_t** gorocksdb_many_search_keys_raw( ); extern void gorocksdb_destroy_many_many_keys(gorocksdb_many_keys_t** many_many_keys, int size); + +void gorocksdb_writebatch_put_many( + rocksdb_writebatch_t* batch, + size_t num_pairs, + char** keys, + size_t* key_sizes, + char** values, + size_t* value_sizes +); + +void gorocksdb_writebatch_put_many_cf( + rocksdb_writebatch_t* batch, + rocksdb_column_family_handle_t* cf, + size_t num_pairs, + char** keys, + size_t* key_sizes, + char** values, + size_t* value_sizes +); diff --git a/util.go b/util.go index aadde792..73fd4c43 100644 --- a/util.go +++ b/util.go @@ -2,7 +2,6 @@ package gorocksdb import "C" import ( - "fmt" "reflect" "unsafe" ) @@ -53,14 +52,11 @@ func byteSliceToArray(vals [][]byte) (**C.char, *C.size_t) { } cCharBuf := C.malloc(C.size_t(unsafe.Sizeof(chars[0])) * C.size_t(len(chars))) - copied := copy(((*[1 << 32]*C.char)(cCharBuf))[:], chars) - fmt.Println("COPIED X BYTES:", copied) + copy(((*[1 << 32]*C.char)(cCharBuf))[:], chars) cChars := (**C.char)(cCharBuf) cSizes := (*C.size_t)(unsafe.Pointer(&sizes[0])) - fmt.Println("sizes", sizes) - fmt.Println("chars", chars) return cChars, cSizes } diff --git a/write_batch.go b/write_batch.go index f33b12e9..d22dcc96 100644 --- a/write_batch.go +++ b/write_batch.go @@ -1,15 +1,18 @@ package gorocksdb // #include "rocksdb/c.h" +// #include "gorocksdb.h" import "C" import ( "errors" "io" + "unsafe" ) // WriteBatch is a batching of Puts, Merges and Deletes. type WriteBatch struct { - c *C.rocksdb_writebatch_t + c *C.rocksdb_writebatch_t + charArrays []**C.char } // NewWriteBatch create a WriteBatch object. @@ -19,7 +22,7 @@ func NewWriteBatch() *WriteBatch { // NewNativeWriteBatch create a WriteBatch object. func NewNativeWriteBatch(c *C.rocksdb_writebatch_t) *WriteBatch { - return &WriteBatch{c} + return &WriteBatch{c: c} } // WriteBatchFrom creates a write batch from a serialized WriteBatch. @@ -41,34 +44,38 @@ func (wb *WriteBatch) PutCF(cf *ColumnFamilyHandle, key, value []byte) { C.rocksdb_writebatch_put_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value))) } -// Putv queues many key and value pairs -func (wb *WriteBatch) Putv(keys, values [][]byte) error { +// PutMany queues many key and value pairs +func (wb *WriteBatch) PutMany(keys, values [][]byte) error { if len(keys) != len(values) { return errors.New("Number of keys and values should be the same") } - numKeys := C.int(len(keys)) + numPairs := C.size_t(len(keys)) cKeys, cKeySizes := byteSliceToArray(keys) cValues, cValueSizes := byteSliceToArray(values) - C.rocksdb_writebatch_putv( + wb.charArrays = append(wb.charArrays, cKeys, cValues) + C.gorocksdb_writebatch_put_many( wb.c, - numKeys, cKeys, cKeySizes, - numKeys, cValues, cValueSizes, + numPairs, + cKeys, cKeySizes, + cValues, cValueSizes, ) return nil } -// PutvCF queues many key and value pairs in a column family -func (wb *WriteBatch) PutvCF(cf *ColumnFamilyHandle, keys, values [][]byte) error { +// PutManyCF queues many key and value pairs in a column family +func (wb *WriteBatch) PutManyCF(cf *ColumnFamilyHandle, keys, values [][]byte) error { if len(keys) != len(values) { return errors.New("Number of keys and values should be the same") } - numKeys := C.int(len(keys)) + numPairs := C.size_t(len(keys)) cKeys, cKeySizes := byteSliceToArray(keys) cValues, cValueSizes := byteSliceToArray(values) - C.rocksdb_writebatch_putv_cf( + wb.charArrays = append(wb.charArrays, cKeys, cValues) + C.gorocksdb_writebatch_put_many_cf( wb.c, cf.c, - numKeys, cKeys, cKeySizes, - numKeys, cValues, cValueSizes, + numPairs, + cKeys, cKeySizes, + cValues, cValueSizes, ) return nil } @@ -129,6 +136,9 @@ func (wb *WriteBatch) Clear() { // Destroy deallocates the WriteBatch object. func (wb *WriteBatch) Destroy() { C.rocksdb_writebatch_destroy(wb.c) + for _, arr := range wb.charArrays { + C.free(unsafe.Pointer(arr)) + } wb.c = nil } diff --git a/write_batch_test.go b/write_batch_test.go index 8913d3cc..99328d5a 100644 --- a/write_batch_test.go +++ b/write_batch_test.go @@ -41,6 +41,43 @@ func TestWriteBatch(t *testing.T) { ensure.True(t, v2.Data() == nil) } +func TestWriteBatchPutMany(t *testing.T) { + db := newTestDB(t, "TestWriteBatchPutMany", nil) + defer db.Close() + + var ( + key1 = []byte("key1") + val1 = []byte("val1") + key2 = []byte("key22") + val2 = []byte("val22") + ) + wo := NewDefaultWriteOptions() + defer wo.Destroy() + + // create and fill the write batch + keys := [][]byte{key1, key2} + values := [][]byte{val1, val2} + wb := NewWriteBatch() + defer wb.Destroy() + wb.PutMany(keys, values) + // ensure.DeepEqual(t, wb.Count(), 2) + + // perform the batch + ensure.Nil(t, db.Write(wo, wb)) + + // check changes + ro := NewDefaultReadOptions() + v1, err := db.Get(ro, key1) + defer v1.Free() + ensure.Nil(t, err) + ensure.DeepEqual(t, v1.Data(), val1) + + v2, err := db.Get(ro, key2) + defer v2.Free() + ensure.Nil(t, err) + ensure.DeepEqual(t, v2.Data(), val2) +} + func TestWriteBatchIterator(t *testing.T) { db := newTestDB(t, "TestWriteBatchIterator", nil) defer db.Close() From 2c71dcf2924946a072a226787ac19e5f85bfffdc Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Tue, 10 Oct 2017 08:57:53 +0200 Subject: [PATCH 22/55] cleanup, fix key_end+next, add prev --- gorocksdb.c | 81 +++++++++--------- gorocksdb.h | 13 ++- iterator.go | 204 +++++++++++++++++++++++-------------------- iterator_test.go | 219 +++++++++++++++++++++++++++++++++++++---------- 4 files changed, 332 insertions(+), 185 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index b1dc67e4..343070d4 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -66,37 +66,9 @@ rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx) { (const char* (*)(void*))(gorocksdb_slicetransform_name)); } -gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, int size) { - int i = 0; - gorocksdb_many_keys_t* many_keys = (gorocksdb_many_keys_t*) malloc(sizeof(gorocksdb_many_keys_t)); - - char** keys; - size_t* key_sizes; - keys = (char**) malloc(size * sizeof(char*)); - key_sizes = (size_t*) malloc(size * sizeof(size_t)); +#define DEFAULT_PAGE_ALLOC_SIZE 512 - for (i = 0; i < size; i++) { - if (!rocksdb_iter_valid(iter)) { - break; - } - - // Stuff - const char* key = rocksdb_iter_key(iter, &key_sizes[i]); - keys[i] = (char*) malloc(key_sizes[i] * sizeof(char)); - memcpy(keys[i], key, key_sizes[i]); - - rocksdb_iter_next(iter); - } - - many_keys->keys = keys; - many_keys->key_sizes = key_sizes; - many_keys->values = NULL; - many_keys->value_sizes = 0; - many_keys->found = i; - return many_keys; -} - -gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter, int page_alloc_size) { +extern gorocksdb_many_keys_t* gorocksdb_iter_many_keys(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter, int page_alloc_size) { int i; char** keys, **values; size_t* key_sizes, *value_sizes; @@ -107,7 +79,7 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int size = page_alloc_size; if (size <= 0) { - size = 512; + size = DEFAULT_PAGE_ALLOC_SIZE; } if (limit > 0 && limit < size) { size = limit; @@ -119,8 +91,9 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, i = 0; while (rocksdb_iter_valid(iter)) { - // Get key + // Get current key const char* key = rocksdb_iter_key(iter, &key_size); + // Check filter if (key_filter->key_prefix_s > 0) { if (key_size < key_filter->key_prefix_s) { @@ -132,14 +105,30 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, } if (key_filter->key_end_s > 0) { cmp_size = key_size > key_filter->key_end_s ? key_filter->key_end_s : key_size; - int c; - c = memcmp(key, key_filter->key_end, cmp_size); + int c = memcmp(key, key_filter->key_end, cmp_size); if (c == 0 && key_filter->key_end_s == key_size) { - break; - } else if (c > 0) { + // keys are equals, we break break; } + if (key_filter->reverse) { + if (c == 0 && key_filter->key_end_s > key_size) { + // key_end is bigger than key, we must stop + break; + } else if (c < 0) { + // key is smaller than key_end, we break + break; + } + } else { + if (c == 0 && key_size > key_filter->key_end_s) { + // key_end is smaller than key, we must stop + break; + } else if (c > 0) { + // key is greater than key_end, we break + break; + } + } } + // Store key if (i == size) { // realloc 2x existing size @@ -152,6 +141,7 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, keys[i] = (char*) malloc(key_size * sizeof(char)); memcpy(keys[i], key, key_size); key_sizes[i] = key_size; + // Get value and store it const char* val = rocksdb_iter_value(iter, &value_size); if (val != NULL) { @@ -162,10 +152,17 @@ gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, } value_sizes[i] = value_size; i++; - // seek next - rocksdb_iter_next(iter); - // check limit + // Next key + if (key_filter->reverse) { + // Move prev + rocksdb_iter_prev(iter); + } else { + // Move next + rocksdb_iter_next(iter); + } + + // Check limit if (limit > 0 && i == limit) { break; } @@ -206,7 +203,8 @@ extern gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* it key_filter.key_prefix_s = keys_searches[i].key_prefix_s; key_filter.key_end = keys_searches[i].key_end; key_filter.key_end_s = keys_searches[i].key_end_s; - result[i] = gorocksdb_iter_next_many_keys_f(iter, keys_searches[i].limit, &key_filter, page_alloc_size); + key_filter.reverse = FALSE; + result[i] = gorocksdb_iter_many_keys(iter, keys_searches[i].limit, &key_filter, page_alloc_size); } return result; } @@ -240,7 +238,8 @@ extern gorocksdb_many_keys_t** gorocksdb_many_search_keys_raw( key_filter.key_prefix_s = key_prefix_s[i]; key_filter.key_end = key_ends[i]; key_filter.key_end_s = key_end_s[i]; - result[i] = gorocksdb_iter_next_many_keys_f(iter, limits[i], &key_filter, page_alloc_size); + key_filter.reverse = FALSE; + result[i] = gorocksdb_iter_many_keys(iter, limits[i], &key_filter, page_alloc_size); } return result; } diff --git a/gorocksdb.h b/gorocksdb.h index c5d40fd6..f145937d 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -10,6 +10,10 @@ typedef struct { } gorocksdb_many_keys_t; +typedef int bool; + +#define FALSE 0 +#define TRUE !FALSE // This API provides convenient C wrapper functions for rocksdb client. @@ -41,17 +45,16 @@ extern rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx); /* Iterate many keys */ -extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys(rocksdb_iterator_t* iter, int size); - typedef struct { char* key_prefix; size_t key_prefix_s; char* key_end; size_t key_end_s; + bool reverse; } gorocksdb_many_keys_filter_t; -extern gorocksdb_many_keys_t* gorocksdb_iter_next_many_keys_f(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter, int page_alloc_size); +extern gorocksdb_many_keys_t* gorocksdb_iter_many_keys(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter, int page_alloc_size); extern void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys); @@ -75,7 +78,7 @@ extern gorocksdb_many_keys_t** gorocksdb_many_search_keys( int page_alloc_size ); -gorocksdb_many_keys_t** gorocksdb_many_search_keys_raw( +extern gorocksdb_many_keys_t** gorocksdb_many_search_keys_raw( rocksdb_iterator_t* iter, char** key_froms, size_t* key_from_s, @@ -90,6 +93,8 @@ gorocksdb_many_keys_t** gorocksdb_many_search_keys_raw( extern void gorocksdb_destroy_many_many_keys(gorocksdb_many_keys_t** many_many_keys, int size); +/* Batch PutMany */ + void gorocksdb_writebatch_put_many( rocksdb_writebatch_t* batch, size_t num_pairs, diff --git a/iterator.go b/iterator.go index 44b0fb2a..ba72d0cb 100644 --- a/iterator.go +++ b/iterator.go @@ -81,70 +81,55 @@ func (iter *Iterator) Next() { C.rocksdb_iter_next(iter.c) } -var ManyKeysPageAllocSize int = 512 - -type ManyKeys struct { - c *C.gorocksdb_many_keys_t +// Prev moves the iterator to the previous sequential key in the database. +func (iter *Iterator) Prev() { + C.rocksdb_iter_prev(iter.c) } -func (m *ManyKeys) Destroy() { - C.gorocksdb_destroy_many_keys(m.c) +// SeekToFirst moves the iterator to the first key in the database. +func (iter *Iterator) SeekToFirst() { + C.rocksdb_iter_seek_to_first(iter.c) } -func (m *ManyKeys) Found() int { - return int(m.c.found) +// SeekToLast moves the iterator to the last key in the database. +func (iter *Iterator) SeekToLast() { + C.rocksdb_iter_seek_to_last(iter.c) } -func (m *ManyKeys) Keys() [][]byte { - found := m.Found() - keys := make([][]byte, found) - - for i := uintptr(0); i < uintptr(found); i++ { - chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.keys)) + i*unsafe.Sizeof(m.c.keys))) - size := *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.key_sizes)) + i*unsafe.Sizeof(m.c.key_sizes))) - keys[i] = charToByte(chars, size) - - } - return keys +// Seek moves the iterator to the position greater than or equal to the key. +func (iter *Iterator) Seek(key []byte) { + cKey := byteToChar(key) + C.rocksdb_iter_seek(iter.c, cKey, C.size_t(len(key))) } -func (m *ManyKeys) Values() [][]byte { - found := m.Found() - values := make([][]byte, found) - - for i := uintptr(0); i < uintptr(found); i++ { - chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.values)) + i*unsafe.Sizeof(m.c.values))) - size := *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.value_sizes)) + i*unsafe.Sizeof(m.c.value_sizes))) - values[i] = charToByte(chars, size) - } - return values +// SeekForPrev moves the iterator to the last key that less than or equal +// to the target key, in contrast with Seek. +func (iter *Iterator) SeekForPrev(key []byte) { + cKey := byteToChar(key) + C.rocksdb_iter_seek_for_prev(iter.c, cKey, C.size_t(len(key))) } -func (m *ManyKeys) Each(each func(i int, key []byte, value []byte) bool) bool { - found := m.Found() - for i := uintptr(0); i < uintptr(found); i++ { - chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.keys)) + i*unsafe.Sizeof(m.c.keys))) - size := *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.key_sizes)) + i*unsafe.Sizeof(m.c.key_sizes))) - key := charToByte(chars, size) - - chars = *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.values)) + i*unsafe.Sizeof(m.c.values))) - size = *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.value_sizes)) + i*unsafe.Sizeof(m.c.value_sizes))) - value := charToByte(chars, size) - - if !each(int(i), key, value) { - return false - } +// Err returns nil if no errors happened during iteration, or the actual +// error otherwise. +func (iter *Iterator) Err() error { + var cErr *C.char + C.rocksdb_iter_get_error(iter.c, &cErr) + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return errors.New(C.GoString(cErr)) } - return true + return nil } -//.... -func (iter *Iterator) NextManyKeys(size int) *ManyKeys { - return &ManyKeys{c: C.gorocksdb_iter_next_many_keys(iter.c, C.int(size))} +// Close closes the iterator. +func (iter *Iterator) Close() { + C.rocksdb_iter_destroy(iter.c) + iter.c = nil } -//.... -func (iter *Iterator) NextManyKeysF(limit int, keyPrefix, keyEnd []byte) *ManyKeys { +var ManyKeysPageAllocSize int = 512 + +func (iter *Iterator) fetchNextManyKeys(reverse bool, limit int, keyPrefix, keyEnd []byte) *ManyKeys { cKeyFilter := C.gorocksdb_many_keys_filter_t{} if len(keyPrefix) > 0 { cKeyPrefix := C.CString(string(keyPrefix)) @@ -158,7 +143,27 @@ func (iter *Iterator) NextManyKeysF(limit int, keyPrefix, keyEnd []byte) *ManyKe cKeyFilter.key_end = cKeyEnd cKeyFilter.key_end_s = C.size_t(len(keyEnd)) } - return &ManyKeys{c: C.gorocksdb_iter_next_many_keys_f(iter.c, C.int(limit), &cKeyFilter, C.int(ManyKeysPageAllocSize))} + if reverse { + cKeyFilter.reverse = 1 + } else { + cKeyFilter.reverse = 0 + } + return &ManyKeys{c: C.gorocksdb_iter_many_keys(iter.c, C.int(limit), &cKeyFilter, C.int(ManyKeysPageAllocSize))} +} + +// NextManyKeys... +func (iter *Iterator) NextManyKeys(limit int, keyPrefix, keyEnd []byte) *ManyKeys { + return iter.fetchNextManyKeys(false, limit, keyPrefix, keyEnd) +} + +// NextManyKeysF... (compat) +func (iter *Iterator) NextManyKeysF(limit int, keyPrefix, keyEnd []byte) *ManyKeys { + return iter.NextManyKeys(limit, keyPrefix, keyEnd) +} + +// PrevManyKeys... +func (iter *Iterator) PrevManyKeys(limit int, keyPrefix, keyEnd []byte) *ManyKeys { + return iter.fetchNextManyKeys(true, limit, keyPrefix, keyEnd) } type KeysSearch struct { @@ -166,11 +171,6 @@ type KeysSearch struct { Limit int } -type ManyManyKeys struct { - c **C.gorocksdb_many_keys_t - size int -} - func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { nbSearches := len(searches) cManyKeysSearches := make([]C.gorocksdb_keys_search_t, nbSearches) @@ -245,61 +245,75 @@ func (iter *Iterator) ManySearchKeysExp(searches []KeysSearch) *ManyManyKeys { return &ManyManyKeys{c: cManyManyKeys, size: nbSearches} } -func (m ManyManyKeys) Result() []*ManyKeys { - result := make([]*ManyKeys, m.size) - for i := uintptr(0); i < uintptr(m.size); i++ { - manyKeys := *(**C.gorocksdb_many_keys_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c)) + i*unsafe.Sizeof(m.c))) - result[i] = &ManyKeys{c: manyKeys} - } - return result +type ManyKeys struct { + c *C.gorocksdb_many_keys_t } -func (m ManyManyKeys) Destroy() { - C.gorocksdb_destroy_many_many_keys(m.c, C.int(m.size)) +func (m *ManyKeys) Destroy() { + C.gorocksdb_destroy_many_keys(m.c) } -// Prev moves the iterator to the previous sequential key in the database. -func (iter *Iterator) Prev() { - C.rocksdb_iter_prev(iter.c) +func (m *ManyKeys) Found() int { + return int(m.c.found) } -// SeekToFirst moves the iterator to the first key in the database. -func (iter *Iterator) SeekToFirst() { - C.rocksdb_iter_seek_to_first(iter.c) +func (m *ManyKeys) Keys() [][]byte { + found := m.Found() + keys := make([][]byte, found) + + for i := uintptr(0); i < uintptr(found); i++ { + chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.keys)) + i*unsafe.Sizeof(m.c.keys))) + size := *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.key_sizes)) + i*unsafe.Sizeof(m.c.key_sizes))) + keys[i] = charToByte(chars, size) + + } + return keys } -// SeekToLast moves the iterator to the last key in the database. -func (iter *Iterator) SeekToLast() { - C.rocksdb_iter_seek_to_last(iter.c) +func (m *ManyKeys) Values() [][]byte { + found := m.Found() + values := make([][]byte, found) + + for i := uintptr(0); i < uintptr(found); i++ { + chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.values)) + i*unsafe.Sizeof(m.c.values))) + size := *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.value_sizes)) + i*unsafe.Sizeof(m.c.value_sizes))) + values[i] = charToByte(chars, size) + } + return values } -// Seek moves the iterator to the position greater than or equal to the key. -func (iter *Iterator) Seek(key []byte) { - cKey := byteToChar(key) - C.rocksdb_iter_seek(iter.c, cKey, C.size_t(len(key))) +func (m *ManyKeys) Each(each func(i int, key []byte, value []byte) bool) bool { + found := m.Found() + for i := uintptr(0); i < uintptr(found); i++ { + chars := *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.keys)) + i*unsafe.Sizeof(m.c.keys))) + size := *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.key_sizes)) + i*unsafe.Sizeof(m.c.key_sizes))) + key := charToByte(chars, size) + + chars = *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.values)) + i*unsafe.Sizeof(m.c.values))) + size = *(*C.size_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c.value_sizes)) + i*unsafe.Sizeof(m.c.value_sizes))) + value := charToByte(chars, size) + + if !each(int(i), key, value) { + return false + } + } + return true } -// SeekForPrev moves the iterator to the last key that less than or equal -// to the target key, in contrast with Seek. -func (iter *Iterator) SeekForPrev(key []byte) { - cKey := byteToChar(key) - C.rocksdb_iter_seek_for_prev(iter.c, cKey, C.size_t(len(key))) +type ManyManyKeys struct { + c **C.gorocksdb_many_keys_t + size int } -// Err returns nil if no errors happened during iteration, or the actual -// error otherwise. -func (iter *Iterator) Err() error { - var cErr *C.char - C.rocksdb_iter_get_error(iter.c, &cErr) - if cErr != nil { - defer C.free(unsafe.Pointer(cErr)) - return errors.New(C.GoString(cErr)) +func (m ManyManyKeys) Result() []*ManyKeys { + result := make([]*ManyKeys, m.size) + for i := uintptr(0); i < uintptr(m.size); i++ { + manyKeys := *(**C.gorocksdb_many_keys_t)(unsafe.Pointer(uintptr(unsafe.Pointer(m.c)) + i*unsafe.Sizeof(m.c))) + result[i] = &ManyKeys{c: manyKeys} } - return nil + return result } -// Close closes the iterator. -func (iter *Iterator) Close() { - C.rocksdb_iter_destroy(iter.c) - iter.c = nil +func (m ManyManyKeys) Destroy() { + C.gorocksdb_destroy_many_many_keys(m.c, C.int(m.size)) } diff --git a/iterator_test.go b/iterator_test.go index 79dd34f1..75046e13 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -30,44 +30,91 @@ func TestIterator(t *testing.T) { ensure.DeepEqual(t, actualKeys, givenKeys) } -func TestIteratorMany(t *testing.T) { +func TestIteratorNextManyWithKeyPrefix(t *testing.T) { db := newTestDB(t, "TestIterator", nil) defer db.Close() // insert keys - givenKeys := [][]byte{[]byte("key1"), []byte("key2"), []byte("key3")} + givenKeys := [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyB1")} wo := NewDefaultWriteOptions() for _, k := range givenKeys { - ensure.Nil(t, db.Put(wo, k, []byte("val"))) + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) } ro := NewDefaultReadOptions() iter := db.NewIterator(ro) defer iter.Close() var actualKeys [][]byte + var actualValues [][]byte iter.SeekToFirst() - manyKeys := iter.NextManyKeys(2) + manyKeys := iter.NextManyKeys(2, []byte("keyA"), nil) for manyKeys.Found() > 0 { for _, k := range manyKeys.Keys() { newK := make([]byte, len(k)) copy(newK, k) actualKeys = append(actualKeys, newK) } + for _, v := range manyKeys.Values() { + newV := make([]byte, len(v)) + copy(newV, v) + actualValues = append(actualValues, newV) + } manyKeys.Destroy() - manyKeys = iter.NextManyKeys(2) + manyKeys = iter.NextManyKeys(2, []byte("keyA"), nil) } manyKeys.Destroy() ensure.Nil(t, iter.Err()) - ensure.DeepEqual(t, actualKeys, givenKeys) + ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3")}) + ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3")}) } -func TestIteratorManyFOnKeyPrefix(t *testing.T) { +func TestIteratorNextManyWithLimit(t *testing.T) { db := newTestDB(t, "TestIterator", nil) defer db.Close() // insert keys - givenKeys := [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyB1")} + givenKeys := [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + + iter.SeekToFirst() + manyKeys := iter.NextManyKeys(-1, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) + manyKeys.Destroy() + + iter.SeekToFirst() + manyKeys = iter.NextManyKeys(0, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) + manyKeys.Destroy() + + iter.SeekToFirst() + manyKeys = iter.NextManyKeys(2, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2")}) + manyKeys.Destroy() + + iter.SeekToFirst() + manyKeys = iter.NextManyKeys(20, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) + manyKeys.Destroy() +} + +func TestIteratorNextManyWithKeyEnd(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("A"), []byte("B"), []byte("C"), []byte("C10"), []byte("C11"), []byte("D")} wo := NewDefaultWriteOptions() for _, k := range givenKeys { ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) @@ -80,7 +127,7 @@ func TestIteratorManyFOnKeyPrefix(t *testing.T) { var actualValues [][]byte iter.SeekToFirst() - manyKeys := iter.NextManyKeysF(2, []byte("keyA"), nil) + manyKeys := iter.NextManyKeys(2, nil, []byte("C1")) for manyKeys.Found() > 0 { for _, k := range manyKeys.Keys() { newK := make([]byte, len(k)) @@ -93,15 +140,97 @@ func TestIteratorManyFOnKeyPrefix(t *testing.T) { actualValues = append(actualValues, newV) } manyKeys.Destroy() - manyKeys = iter.NextManyKeysF(2, []byte("keyA"), nil) + manyKeys = iter.NextManyKeys(2, nil, []byte("C1")) } manyKeys.Destroy() ensure.Nil(t, iter.Err()) - ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3")}) - ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3")}) + ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("A"), []byte("B"), []byte("C")}) + ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_A"), []byte("val_B"), []byte("val_C")}) +} + +func TestIteratorNextManyWithKeyPrefixAndEnd(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("keyA"), []byte("keyB"), []byte("keyC"), []byte("keyC1")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + var actualKeys [][]byte + var actualValues [][]byte + iter.SeekToFirst() + + manyKeys := iter.NextManyKeys(2, []byte("key"), []byte("keyC1")) + for manyKeys.Found() > 0 { + for _, k := range manyKeys.Keys() { + newK := make([]byte, len(k)) + copy(newK, k) + actualKeys = append(actualKeys, newK) + } + for _, v := range manyKeys.Values() { + newV := make([]byte, len(v)) + copy(newV, v) + actualValues = append(actualValues, newV) + } + manyKeys.Destroy() + manyKeys = iter.NextManyKeys(2, []byte("key"), []byte("keyC1")) + } + manyKeys.Destroy() + ensure.Nil(t, iter.Err()) + ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA"), []byte("keyB"), []byte("keyC")}) + ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA"), []byte("val_keyB"), []byte("val_keyC")}) } -func TestIteratorManyFWithLimit(t *testing.T) { +func TestIteratorPrevManyWithKeyPrefix(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyB1")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + var actualKeys [][]byte + var actualValues [][]byte + + iter.SeekToLast() + manyKeys := iter.PrevManyKeys(2, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Found(), 0) + + iter.Seek([]byte("keyA3")) + manyKeys = iter.PrevManyKeys(2, []byte("keyA"), nil) + for manyKeys.Found() > 0 { + for _, k := range manyKeys.Keys() { + newK := make([]byte, len(k)) + copy(newK, k) + actualKeys = append(actualKeys, newK) + } + for _, v := range manyKeys.Values() { + newV := make([]byte, len(v)) + copy(newV, v) + actualValues = append(actualValues, newV) + } + manyKeys.Destroy() + manyKeys = iter.PrevManyKeys(2, []byte("keyA"), nil) + } + manyKeys.Destroy() + ensure.Nil(t, iter.Err()) + ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA3"), []byte("keyA2"), []byte("keyA1")}) + ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA3"), []byte("val_keyA2"), []byte("val_keyA1")}) +} + +func TestIteratorPrevManyWithLimit(t *testing.T) { db := newTestDB(t, "TestIterator", nil) defer db.Close() @@ -116,37 +245,37 @@ func TestIteratorManyFWithLimit(t *testing.T) { iter := db.NewIterator(ro) defer iter.Close() - iter.SeekToFirst() - manyKeys := iter.NextManyKeysF(-1, []byte("keyA"), nil) - ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) - ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) + iter.SeekToLast() + manyKeys := iter.PrevManyKeys(-1, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA4"), []byte("keyA3"), []byte("keyA2"), []byte("keyA1")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA4"), []byte("val_keyA3"), []byte("val_keyA2"), []byte("val_keyA1")}) manyKeys.Destroy() - iter.SeekToFirst() - manyKeys = iter.NextManyKeysF(0, []byte("keyA"), nil) - ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) - ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) + iter.SeekToLast() + manyKeys = iter.PrevManyKeys(0, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA4"), []byte("keyA3"), []byte("keyA2"), []byte("keyA1")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA4"), []byte("val_keyA3"), []byte("val_keyA2"), []byte("val_keyA1")}) manyKeys.Destroy() - iter.SeekToFirst() - manyKeys = iter.NextManyKeysF(2, []byte("keyA"), nil) - ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2")}) - ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2")}) + iter.SeekToLast() + manyKeys = iter.PrevManyKeys(2, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA4"), []byte("keyA3")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA4"), []byte("val_keyA3")}) manyKeys.Destroy() - iter.SeekToFirst() - manyKeys = iter.NextManyKeysF(20, []byte("keyA"), nil) - ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA1"), []byte("keyA2"), []byte("keyA3"), []byte("keyA4")}) - ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA1"), []byte("val_keyA2"), []byte("val_keyA3"), []byte("val_keyA4")}) + iter.SeekToLast() + manyKeys = iter.PrevManyKeys(20, []byte("keyA"), nil) + ensure.DeepEqual(t, manyKeys.Keys(), [][]byte{[]byte("keyA4"), []byte("keyA3"), []byte("keyA2"), []byte("keyA1")}) + ensure.DeepEqual(t, manyKeys.Values(), [][]byte{[]byte("val_keyA4"), []byte("val_keyA3"), []byte("val_keyA2"), []byte("val_keyA1")}) manyKeys.Destroy() } -func TestIteratorManyFOnKeyEnd(t *testing.T) { +func TestIteratorPrevManyWithKeyEnd(t *testing.T) { db := newTestDB(t, "TestIterator", nil) defer db.Close() // insert keys - givenKeys := [][]byte{[]byte("A"), []byte("B"), []byte("C"), []byte("C1"), []byte("D")} + givenKeys := [][]byte{[]byte("A"), []byte("B"), []byte("C"), []byte("C11"), []byte("C12"), []byte("D")} wo := NewDefaultWriteOptions() for _, k := range givenKeys { ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) @@ -157,9 +286,9 @@ func TestIteratorManyFOnKeyEnd(t *testing.T) { defer iter.Close() var actualKeys [][]byte var actualValues [][]byte - iter.SeekToFirst() + iter.SeekToLast() - manyKeys := iter.NextManyKeysF(2, nil, []byte("C1")) + manyKeys := iter.PrevManyKeys(2, nil, []byte("C1")) for manyKeys.Found() > 0 { for _, k := range manyKeys.Keys() { newK := make([]byte, len(k)) @@ -172,15 +301,15 @@ func TestIteratorManyFOnKeyEnd(t *testing.T) { actualValues = append(actualValues, newV) } manyKeys.Destroy() - manyKeys = iter.NextManyKeysF(2, nil, []byte("C1")) + manyKeys = iter.PrevManyKeys(2, nil, []byte("C1")) } manyKeys.Destroy() ensure.Nil(t, iter.Err()) - ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("A"), []byte("B"), []byte("C")}) - ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_A"), []byte("val_B"), []byte("val_C")}) + ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("D"), []byte("C12"), []byte("C11")}) + ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_D"), []byte("val_C12"), []byte("val_C11")}) } -func TestIteratorManyFOnKeyPrefixAndEnd(t *testing.T) { +func TestIteratorPrevManyWithKeyPrefixAndEnd(t *testing.T) { db := newTestDB(t, "TestIterator", nil) defer db.Close() @@ -196,9 +325,9 @@ func TestIteratorManyFOnKeyPrefixAndEnd(t *testing.T) { defer iter.Close() var actualKeys [][]byte var actualValues [][]byte - iter.SeekToFirst() + iter.SeekToLast() - manyKeys := iter.NextManyKeysF(2, []byte("key"), []byte("keyC1")) + manyKeys := iter.PrevManyKeys(2, []byte("key"), []byte("keyA")) for manyKeys.Found() > 0 { for _, k := range manyKeys.Keys() { newK := make([]byte, len(k)) @@ -211,12 +340,12 @@ func TestIteratorManyFOnKeyPrefixAndEnd(t *testing.T) { actualValues = append(actualValues, newV) } manyKeys.Destroy() - manyKeys = iter.NextManyKeysF(2, []byte("key"), []byte("keyC1")) + manyKeys = iter.PrevManyKeys(2, []byte("key"), []byte("keyA")) } manyKeys.Destroy() ensure.Nil(t, iter.Err()) - ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyA"), []byte("keyB"), []byte("keyC")}) - ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA"), []byte("val_keyB"), []byte("val_keyC")}) + ensure.DeepEqual(t, actualKeys, [][]byte{[]byte("keyC1"), []byte("keyC"), []byte("keyB")}) + ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyC1"), []byte("val_keyC"), []byte("val_keyB")}) } func TestIteratorManySearchKeys(t *testing.T) { @@ -363,7 +492,7 @@ func TestIteratorManySearchKeysWithKeyPrefixAndEnd(t *testing.T) { ensure.DeepEqual(t, result[1].Values(), [][]byte{[]byte("val_keyC"), []byte("val_keyC0")}) } -func TestIteratorManyKeysEach(t *testing.T) { +func TestIteratorNextManyKeysEach(t *testing.T) { db := newTestDB(t, "TestIterator", nil) defer db.Close() @@ -379,7 +508,7 @@ func TestIteratorManyKeysEach(t *testing.T) { defer iter.Close() iter.SeekToFirst() - manyKeys := iter.NextManyKeysF(-1, []byte("keyA"), nil) + manyKeys := iter.NextManyKeys(-1, []byte("keyA"), nil) actualKeys := [][]byte{} actualValues := [][]byte{} @@ -405,4 +534,4 @@ func TestIteratorManyKeysEach(t *testing.T) { ensure.DeepEqual(t, actualValues, [][]byte{[]byte("val_keyA1"), []byte("val_keyA2")}) manyKeys.Destroy() -} \ No newline at end of file +} From 8612af44bf94c73628c7c738dd25f1b248d7818c Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Tue, 10 Oct 2017 09:12:09 +0200 Subject: [PATCH 23/55] cleanup + add reverse to ManySearchKeys --- gorocksdb.c | 13 ++++---- gorocksdb.h | 5 +-- iterator.go | 92 ++++++++++++++++++++++++++--------------------------- 3 files changed, 54 insertions(+), 56 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index 343070d4..a231374c 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -68,7 +68,7 @@ rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx) { #define DEFAULT_PAGE_ALLOC_SIZE 512 -extern gorocksdb_many_keys_t* gorocksdb_iter_many_keys(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter, int page_alloc_size) { +extern gorocksdb_many_keys_t* gorocksdb_iter_many_keys(rocksdb_iterator_t* iter, int limit, bool reverse, const gorocksdb_many_keys_filter_t* key_filter, int page_alloc_size) { int i; char** keys, **values; size_t* key_sizes, *value_sizes; @@ -110,7 +110,7 @@ extern gorocksdb_many_keys_t* gorocksdb_iter_many_keys(rocksdb_iterator_t* iter, // keys are equals, we break break; } - if (key_filter->reverse) { + if (reverse) { if (c == 0 && key_filter->key_end_s > key_size) { // key_end is bigger than key, we must stop break; @@ -154,7 +154,7 @@ extern gorocksdb_many_keys_t* gorocksdb_iter_many_keys(rocksdb_iterator_t* iter, i++; // Next key - if (key_filter->reverse) { + if (reverse) { // Move prev rocksdb_iter_prev(iter); } else { @@ -203,8 +203,7 @@ extern gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* it key_filter.key_prefix_s = keys_searches[i].key_prefix_s; key_filter.key_end = keys_searches[i].key_end; key_filter.key_end_s = keys_searches[i].key_end_s; - key_filter.reverse = FALSE; - result[i] = gorocksdb_iter_many_keys(iter, keys_searches[i].limit, &key_filter, page_alloc_size); + result[i] = gorocksdb_iter_many_keys(iter, keys_searches[i].limit, keys_searches[i].reverse, &key_filter, page_alloc_size); } return result; } @@ -226,6 +225,7 @@ extern gorocksdb_many_keys_t** gorocksdb_many_search_keys_raw( char** key_ends, size_t* key_end_s, int* limits, + bool* reverse, int size, int page_alloc_size ) { @@ -238,8 +238,7 @@ extern gorocksdb_many_keys_t** gorocksdb_many_search_keys_raw( key_filter.key_prefix_s = key_prefix_s[i]; key_filter.key_end = key_ends[i]; key_filter.key_end_s = key_end_s[i]; - key_filter.reverse = FALSE; - result[i] = gorocksdb_iter_many_keys(iter, limits[i], &key_filter, page_alloc_size); + result[i] = gorocksdb_iter_many_keys(iter, limits[i], reverse[i], &key_filter, page_alloc_size); } return result; } diff --git a/gorocksdb.h b/gorocksdb.h index f145937d..829da0d2 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -50,11 +50,10 @@ typedef struct { size_t key_prefix_s; char* key_end; size_t key_end_s; - bool reverse; } gorocksdb_many_keys_filter_t; -extern gorocksdb_many_keys_t* gorocksdb_iter_many_keys(rocksdb_iterator_t* iter, int limit, const gorocksdb_many_keys_filter_t* key_filter, int page_alloc_size); +extern gorocksdb_many_keys_t* gorocksdb_iter_many_keys(rocksdb_iterator_t* iter, int limit, bool reverse, const gorocksdb_many_keys_filter_t* key_filter, int page_alloc_size); extern void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys); @@ -68,6 +67,7 @@ typedef struct { char* key_end; size_t key_end_s; int limit; + bool reverse; } gorocksdb_keys_search_t; @@ -87,6 +87,7 @@ extern gorocksdb_many_keys_t** gorocksdb_many_search_keys_raw( char** key_ends, size_t* key_end_s, int* limits, + bool* reverse, int size, int page_alloc_size ); diff --git a/iterator.go b/iterator.go index ba72d0cb..5ccbe4d7 100644 --- a/iterator.go +++ b/iterator.go @@ -143,12 +143,7 @@ func (iter *Iterator) fetchNextManyKeys(reverse bool, limit int, keyPrefix, keyE cKeyFilter.key_end = cKeyEnd cKeyFilter.key_end_s = C.size_t(len(keyEnd)) } - if reverse { - cKeyFilter.reverse = 1 - } else { - cKeyFilter.reverse = 0 - } - return &ManyKeys{c: C.gorocksdb_iter_many_keys(iter.c, C.int(limit), &cKeyFilter, C.int(ManyKeysPageAllocSize))} + return &ManyKeys{c: C.gorocksdb_iter_many_keys(iter.c, C.int(limit), C.bool(btoi(reverse)), &cKeyFilter, C.int(ManyKeysPageAllocSize))} } // NextManyKeys... @@ -167,15 +162,18 @@ func (iter *Iterator) PrevManyKeys(limit int, keyPrefix, keyEnd []byte) *ManyKey } type KeysSearch struct { - KeyFrom, KeyPrefix, KeyEnd []byte - Limit int + KeyFrom, + KeyPrefix, + KeyEnd []byte + Limit int + Reverse bool } func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { nbSearches := len(searches) cManyKeysSearches := make([]C.gorocksdb_keys_search_t, nbSearches) for i := range searches { - cKSearch := C.gorocksdb_keys_search_t{limit: C.int(searches[i].Limit)} + cKSearch := C.gorocksdb_keys_search_t{limit: C.int(searches[i].Limit), reverse: C.bool(btoi(searches[i].Reverse))} cKSearch.key_from = C.CString(string(searches[i].KeyFrom)) cKSearch.key_from_s = C.size_t(len(searches[i].KeyFrom)) if len(searches[i].KeyPrefix) > 0 { @@ -206,44 +204,44 @@ func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { return &ManyManyKeys{c: cManyManyKeys, size: nbSearches} } -func (iter *Iterator) ManySearchKeysExp(searches []KeysSearch) *ManyManyKeys { - nbSearches := len(searches) - - cKeyFroms := C.malloc(C.size_t(nbSearches) * C.size_t(unsafe.Sizeof(uintptr(0)))) - defer C.free(cKeyFroms) - cKeyPrefixes := C.malloc(C.size_t(nbSearches) * C.size_t(unsafe.Sizeof(uintptr(0)))) - defer C.free(cKeyPrefixes) - cKeyEnds := C.malloc(C.size_t(nbSearches) * C.size_t(unsafe.Sizeof(uintptr(0)))) - defer C.free(cKeyEnds) - cKeyFromSizes := make([]C.size_t, nbSearches) - cKeyPrefixSizes := make([]C.size_t, nbSearches) - cKeyEndSizes := make([]C.size_t, nbSearches) - cLimits := make([]C.int, nbSearches) - - for i := uintptr(0); i < uintptr(nbSearches); i++ { - search := searches[i] - *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(cKeyFroms)) + i*unsafe.Sizeof(cKeyFroms))) = byteToChar(search.KeyFrom) - *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(cKeyPrefixes)) + i*unsafe.Sizeof(cKeyPrefixes))) = byteToChar(search.KeyPrefix) - *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(cKeyEnds)) + i*unsafe.Sizeof(cKeyEnds))) = byteToChar(search.KeyEnd) - cKeyFromSizes[i] = C.size_t(len(searches[i].KeyFrom)) - cKeyPrefixSizes[i] = C.size_t(len(searches[i].KeyPrefix)) - cKeyEndSizes[i] = C.size_t(len(searches[i].KeyEnd)) - cLimits[i] = C.int(searches[i].Limit) - } - cManyManyKeys := C.gorocksdb_many_search_keys_raw( - iter.c, - (**C.char)(unsafe.Pointer(cKeyFroms)), - (*C.size_t)(unsafe.Pointer(&cKeyFromSizes[0])), - (**C.char)(unsafe.Pointer(cKeyPrefixes)), - (*C.size_t)(unsafe.Pointer(&cKeyPrefixSizes[0])), - (**C.char)(unsafe.Pointer(cKeyEnds)), - (*C.size_t)(unsafe.Pointer(&cKeyEndSizes[0])), - (*C.int)(unsafe.Pointer(&cLimits[0])), - C.int(nbSearches), - C.int(ManyKeysPageAllocSize), - ) - return &ManyManyKeys{c: cManyManyKeys, size: nbSearches} -} +//func (iter *Iterator) ManySearchKeysExp(searches []KeysSearch) *ManyManyKeys { +// nbSearches := len(searches) +// +// cKeyFroms := C.malloc(C.size_t(nbSearches) * C.size_t(unsafe.Sizeof(uintptr(0)))) +// defer C.free(cKeyFroms) +// cKeyPrefixes := C.malloc(C.size_t(nbSearches) * C.size_t(unsafe.Sizeof(uintptr(0)))) +// defer C.free(cKeyPrefixes) +// cKeyEnds := C.malloc(C.size_t(nbSearches) * C.size_t(unsafe.Sizeof(uintptr(0)))) +// defer C.free(cKeyEnds) +// cKeyFromSizes := make([]C.size_t, nbSearches) +// cKeyPrefixSizes := make([]C.size_t, nbSearches) +// cKeyEndSizes := make([]C.size_t, nbSearches) +// cLimits := make([]C.int, nbSearches) +// +// for i := uintptr(0); i < uintptr(nbSearches); i++ { +// search := searches[i] +// *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(cKeyFroms)) + i*unsafe.Sizeof(cKeyFroms))) = byteToChar(search.KeyFrom) +// *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(cKeyPrefixes)) + i*unsafe.Sizeof(cKeyPrefixes))) = byteToChar(search.KeyPrefix) +// *(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(cKeyEnds)) + i*unsafe.Sizeof(cKeyEnds))) = byteToChar(search.KeyEnd) +// cKeyFromSizes[i] = C.size_t(len(searches[i].KeyFrom)) +// cKeyPrefixSizes[i] = C.size_t(len(searches[i].KeyPrefix)) +// cKeyEndSizes[i] = C.size_t(len(searches[i].KeyEnd)) +// cLimits[i] = C.int(searches[i].Limit) +// } +// cManyManyKeys := C.gorocksdb_many_search_keys_raw( +// iter.c, +// (**C.char)(unsafe.Pointer(cKeyFroms)), +// (*C.size_t)(unsafe.Pointer(&cKeyFromSizes[0])), +// (**C.char)(unsafe.Pointer(cKeyPrefixes)), +// (*C.size_t)(unsafe.Pointer(&cKeyPrefixSizes[0])), +// (**C.char)(unsafe.Pointer(cKeyEnds)), +// (*C.size_t)(unsafe.Pointer(&cKeyEndSizes[0])), +// (*C.int)(unsafe.Pointer(&cLimits[0])), +// C.int(nbSearches), +// C.int(ManyKeysPageAllocSize), +// ) +// return &ManyManyKeys{c: cManyManyKeys, size: nbSearches} +//} type ManyKeys struct { c *C.gorocksdb_many_keys_t From 633ea42facacd2cf1bd3327d1ad16d534db917cb Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Tue, 10 Oct 2017 13:25:39 +0200 Subject: [PATCH 24/55] add ExcludeKeyFrom option for ManySearchKeys --- gorocksdb.c | 11 +++++++++++ gorocksdb.h | 1 + iterator.go | 11 ++++++++--- iterator_test.go | 44 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 64 insertions(+), 3 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index a231374c..ecfdf302 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -199,6 +199,17 @@ extern gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* it gorocksdb_many_keys_t** result = (gorocksdb_many_keys_t**) malloc(size*sizeof(gorocksdb_many_keys_t*)); for (i=0; i < size; i++) { rocksdb_iter_seek(iter, keys_searches[i].key_from, keys_searches[i].key_from_s); + if (keys_searches[i].exclude_key_from && rocksdb_iter_valid(iter)) { + size_t key_size; + const char* key = rocksdb_iter_key(iter, &key_size); + if (keys_searches[i].key_from_s == key_size && memcmp(key, keys_searches[i].key_from, key_size) == 0) { + if (keys_searches[i].reverse) { + rocksdb_iter_prev(iter); + } else { + rocksdb_iter_next(iter); + } + } + } key_filter.key_prefix = keys_searches[i].key_prefix; key_filter.key_prefix_s = keys_searches[i].key_prefix_s; key_filter.key_end = keys_searches[i].key_end; diff --git a/gorocksdb.h b/gorocksdb.h index 829da0d2..0a068a53 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -68,6 +68,7 @@ typedef struct { size_t key_end_s; int limit; bool reverse; + bool exclude_key_from; } gorocksdb_keys_search_t; diff --git a/iterator.go b/iterator.go index 5ccbe4d7..ae11f426 100644 --- a/iterator.go +++ b/iterator.go @@ -165,15 +165,20 @@ type KeysSearch struct { KeyFrom, KeyPrefix, KeyEnd []byte - Limit int - Reverse bool + Limit int + Reverse bool + ExcludeKeyFrom bool } func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { nbSearches := len(searches) cManyKeysSearches := make([]C.gorocksdb_keys_search_t, nbSearches) for i := range searches { - cKSearch := C.gorocksdb_keys_search_t{limit: C.int(searches[i].Limit), reverse: C.bool(btoi(searches[i].Reverse))} + cKSearch := C.gorocksdb_keys_search_t{ + limit: C.int(searches[i].Limit), + reverse: C.bool(btoi(searches[i].Reverse)), + exclude_key_from: C.bool(btoi(searches[i].ExcludeKeyFrom)), + } cKSearch.key_from = C.CString(string(searches[i].KeyFrom)) cKSearch.key_from_s = C.size_t(len(searches[i].KeyFrom)) if len(searches[i].KeyPrefix) > 0 { diff --git a/iterator_test.go b/iterator_test.go index 75046e13..d28a40ca 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -385,6 +385,50 @@ func TestIteratorManySearchKeys(t *testing.T) { ensure.DeepEqual(t, result[2].Values(), [][]byte{}) } +func TestIteratorManySearchKeysExcludeKeyFrom(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("A"), []byte("B"), []byte("C"), []byte("D"), []byte("E"), []byte("F")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + + searches := make([]KeysSearch, 5) + searches[0] = KeysSearch{KeyFrom: []byte("A"), Limit: 1000, Reverse: false, ExcludeKeyFrom: true} + searches[1] = KeysSearch{KeyFrom: []byte("D"), Limit: 1000, Reverse: false, ExcludeKeyFrom: false} + searches[2] = KeysSearch{KeyFrom: []byte("A"), Limit: 1000, Reverse: true, ExcludeKeyFrom: true} + searches[3] = KeysSearch{KeyFrom: []byte("A"), Limit: 1000, Reverse: true, ExcludeKeyFrom: false} + searches[4] = KeysSearch{KeyFrom: []byte("D"), Limit: 1000, Reverse: true, ExcludeKeyFrom: false} + manyManyKeys := iter.ManySearchKeys(searches) + defer manyManyKeys.Destroy() + result := manyManyKeys.Result() + if len(result) != len(searches) { + t.Fatalf("result len should be %d", len(searches)) + } + ensure.DeepEqual(t, result[0].Found(), 5) + ensure.DeepEqual(t, result[0].Keys(), [][]byte{[]byte("B"), []byte("C"), []byte("D"), []byte("E"), []byte("F")}) + ensure.DeepEqual(t, result[0].Values(), [][]byte{[]byte("val_B"), []byte("val_C"), []byte("val_D"), []byte("val_E"), []byte("val_F")}) + ensure.DeepEqual(t, result[1].Found(), 3) + ensure.DeepEqual(t, result[1].Keys(), [][]byte{[]byte("D"), []byte("E"), []byte("F")}) + ensure.DeepEqual(t, result[1].Values(), [][]byte{[]byte("val_D"), []byte("val_E"), []byte("val_F")}) + ensure.DeepEqual(t, result[2].Found(), 0) + ensure.DeepEqual(t, result[2].Keys(), [][]byte{}) + ensure.DeepEqual(t, result[2].Values(), [][]byte{}) + ensure.DeepEqual(t, result[3].Found(), 1) + ensure.DeepEqual(t, result[3].Keys(), [][]byte{[]byte("A")}) + ensure.DeepEqual(t, result[3].Values(), [][]byte{[]byte("val_A")}) + ensure.DeepEqual(t, result[4].Found(), 4) + ensure.DeepEqual(t, result[4].Keys(), [][]byte{[]byte("D"), []byte("C"), []byte("B"), []byte("A")}) + ensure.DeepEqual(t, result[4].Values(), [][]byte{[]byte("val_D"), []byte("val_C"), []byte("val_B"), []byte("val_A")}) +} + func TestIteratorManySearchKeysWithKeyPrefix(t *testing.T) { db := newTestDB(t, "TestIterator", nil) defer db.Close() From bf68d76d17ae30bf84275318a6b87b2286e11e8a Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Tue, 10 Oct 2017 18:15:00 +0200 Subject: [PATCH 25/55] fix / call rocksdb_iter_seek_for_prev when prev is used --- gorocksdb.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/gorocksdb.c b/gorocksdb.c index ecfdf302..f5b0d96f 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -198,7 +198,11 @@ extern gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* it gorocksdb_many_keys_filter_t key_filter; gorocksdb_many_keys_t** result = (gorocksdb_many_keys_t**) malloc(size*sizeof(gorocksdb_many_keys_t*)); for (i=0; i < size; i++) { - rocksdb_iter_seek(iter, keys_searches[i].key_from, keys_searches[i].key_from_s); + if (keys_searches[i].reverse) { + rocksdb_iter_seek_for_prev(iter, keys_searches[i].key_from, keys_searches[i].key_from_s); + } else { + rocksdb_iter_seek(iter, keys_searches[i].key_from, keys_searches[i].key_from_s); + } if (keys_searches[i].exclude_key_from && rocksdb_iter_valid(iter)) { size_t key_size; const char* key = rocksdb_iter_key(iter, &key_size); From bee9853948af222c0bad4cb19f9d0e82ec4fb909 Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Tue, 10 Oct 2017 20:37:19 +0200 Subject: [PATCH 26/55] elaborate tests --- iterator_test.go | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/iterator_test.go b/iterator_test.go index d28a40ca..26b6860e 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -385,6 +385,39 @@ func TestIteratorManySearchKeys(t *testing.T) { ensure.DeepEqual(t, result[2].Values(), [][]byte{}) } +func TestIteratorManySearchKeysReverse(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("A1"), []byte("A2"), []byte("C1"), []byte("C2"), []byte("D"), []byte("E"), []byte("F")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + + searches := make([]KeysSearch, 2) + searches[0] = KeysSearch{KeyFrom: []byte("C3"), Limit: 1000, Reverse: true} + searches[1] = KeysSearch{KeyFrom: []byte("C2"), Limit: 1000, Reverse: true} + + manyManyKeys := iter.ManySearchKeys(searches) + defer manyManyKeys.Destroy() + result := manyManyKeys.Result() + if len(result) != len(searches) { + t.Fatalf("result len should be %d", len(searches)) + } + ensure.DeepEqual(t, result[0].Found(), 4) + ensure.DeepEqual(t, result[0].Keys(), [][]byte{[]byte("C2"), []byte("C1"), []byte("A2"), []byte("A1")}) + ensure.DeepEqual(t, result[0].Values(), [][]byte{[]byte("val_C2"), []byte("val_C1"), []byte("val_A2"), []byte("val_A1")}) + ensure.DeepEqual(t, result[1].Found(), 4) + ensure.DeepEqual(t, result[1].Keys(), [][]byte{[]byte("C2"), []byte("C1"), []byte("A2"), []byte("A1")}) + ensure.DeepEqual(t, result[1].Values(), [][]byte{[]byte("val_C2"), []byte("val_C1"), []byte("val_A2"), []byte("val_A1")}) +} + func TestIteratorManySearchKeysExcludeKeyFrom(t *testing.T) { db := newTestDB(t, "TestIterator", nil) defer db.Close() From 0257acde81a9d71f6996a51e122ce8245aba149a Mon Sep 17 00:00:00 2001 From: miguel gomard Date: Wed, 11 Oct 2017 08:30:52 +0200 Subject: [PATCH 27/55] ManySearch accepting enpty KeyFrom --- gorocksdb.c | 46 ++++++++++++++++++++++++++++++---------------- iterator_test.go | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 16 deletions(-) diff --git a/gorocksdb.c b/gorocksdb.c index f5b0d96f..8e242da0 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -193,27 +193,41 @@ extern void gorocksdb_destroy_many_keys(gorocksdb_many_keys_t* many_keys) { free(many_keys); } +void _seek(rocksdb_iterator_t* iter, char* to_key, size_t to_key_s, bool reverse, bool exclude_to_key) { + // seek + if (reverse) { + if (to_key_s > 0) { + rocksdb_iter_seek_for_prev(iter, to_key, to_key_s); + } else { + rocksdb_iter_seek_to_last(iter); + } + } else { + if (to_key_s > 0) { + rocksdb_iter_seek(iter, to_key, to_key_s); + } else { + rocksdb_iter_seek_to_first(iter); + } + } + // jump current? + if (exclude_to_key && rocksdb_iter_valid(iter)) { + size_t key_size; + const char* key = rocksdb_iter_key(iter, &key_size); + if (to_key_s == key_size && memcmp(key, to_key, key_size) == 0) { + if (reverse) { + rocksdb_iter_prev(iter); + } else { + rocksdb_iter_next(iter); + } + } + } +} + extern gorocksdb_many_keys_t** gorocksdb_many_search_keys(rocksdb_iterator_t* iter, const gorocksdb_keys_search_t* keys_searches, int size, int page_alloc_size) { int i; gorocksdb_many_keys_filter_t key_filter; gorocksdb_many_keys_t** result = (gorocksdb_many_keys_t**) malloc(size*sizeof(gorocksdb_many_keys_t*)); for (i=0; i < size; i++) { - if (keys_searches[i].reverse) { - rocksdb_iter_seek_for_prev(iter, keys_searches[i].key_from, keys_searches[i].key_from_s); - } else { - rocksdb_iter_seek(iter, keys_searches[i].key_from, keys_searches[i].key_from_s); - } - if (keys_searches[i].exclude_key_from && rocksdb_iter_valid(iter)) { - size_t key_size; - const char* key = rocksdb_iter_key(iter, &key_size); - if (keys_searches[i].key_from_s == key_size && memcmp(key, keys_searches[i].key_from, key_size) == 0) { - if (keys_searches[i].reverse) { - rocksdb_iter_prev(iter); - } else { - rocksdb_iter_next(iter); - } - } - } + _seek(iter, keys_searches[i].key_from, keys_searches[i].key_from_s, keys_searches[i].reverse, keys_searches[i].exclude_key_from); key_filter.key_prefix = keys_searches[i].key_prefix; key_filter.key_prefix_s = keys_searches[i].key_prefix_s; key_filter.key_end = keys_searches[i].key_end; diff --git a/iterator_test.go b/iterator_test.go index 26b6860e..5b20f61a 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -385,6 +385,45 @@ func TestIteratorManySearchKeys(t *testing.T) { ensure.DeepEqual(t, result[2].Values(), [][]byte{}) } +func TestIteratorManySearchKeysEmptyKeyFrom(t *testing.T) { + db := newTestDB(t, "TestIterator", nil) + defer db.Close() + + // insert keys + givenKeys := [][]byte{[]byte("A"), []byte("B"), []byte("C"), []byte("D"), []byte("E"), []byte("F")} + wo := NewDefaultWriteOptions() + for _, k := range givenKeys { + ensure.Nil(t, db.Put(wo, k, []byte("val_"+string(k)))) + } + + ro := NewDefaultReadOptions() + iter := db.NewIterator(ro) + defer iter.Close() + + searches := make([]KeysSearch, 4) + searches[0] = KeysSearch{Limit: 3} + searches[1] = KeysSearch{Limit: 3, ExcludeKeyFrom: true} + searches[2] = KeysSearch{Limit: 3, Reverse: true} + searches[3] = KeysSearch{Limit: 3, ExcludeKeyFrom: true, Reverse: true} + + manyManyKeys := iter.ManySearchKeys(searches) + defer manyManyKeys.Destroy() + result := manyManyKeys.Result() + if len(result) != len(searches) { + t.Fatalf("result len should be %d", len(searches)) + } + ensure.DeepEqual(t, result[0].Found(), 3) + ensure.DeepEqual(t, result[0].Keys(), [][]byte{[]byte("A"), []byte("B"), []byte("C")}) + ensure.DeepEqual(t, result[0].Values(), [][]byte{[]byte("val_A"), []byte("val_B"), []byte("val_C")}) + ensure.DeepEqual(t, result[0].Keys(), result[1].Keys()) + ensure.DeepEqual(t, result[0].Values(), result[1].Values()) + ensure.DeepEqual(t, result[2].Found(), 3) + ensure.DeepEqual(t, result[2].Keys(), [][]byte{[]byte("F"), []byte("E"), []byte("D")}) + ensure.DeepEqual(t, result[2].Values(), [][]byte{[]byte("val_F"), []byte("val_E"), []byte("val_D")}) + ensure.DeepEqual(t, result[2].Keys(), result[3].Keys()) + ensure.DeepEqual(t, result[2].Values(), result[3].Values()) +} + func TestIteratorManySearchKeysReverse(t *testing.T) { db := newTestDB(t, "TestIterator", nil) defer db.Close() From 15ea28d3df4969b90e90d2fbc9a4d142b58ca9f5 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 9 Jan 2018 11:26:35 +0100 Subject: [PATCH 28/55] Free memory earlier for PutMany operation --- util.go | 12 +++++++++++- write_batch.go | 21 +++++++++------------ 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/util.go b/util.go index 73fd4c43..737706d6 100644 --- a/util.go +++ b/util.go @@ -1,5 +1,6 @@ package gorocksdb +// #include "stdlib.h" import "C" import ( "reflect" @@ -39,7 +40,11 @@ func byteToChar(b []byte) *C.char { return c } -func byteSliceToArray(vals [][]byte) (**C.char, *C.size_t) { +// bytesSliceToArray converts a slice of byte slices to two C arrays. One +// containing pointers to the byte slices and one containing their sizes. +// IMPORTANT: The **C.char array is malloced and should be freed using +// freeCharsArray after it is used. +func bytesSliceToArray(vals [][]byte) (**C.char, *C.size_t) { if len(vals) == 0 { return nil, nil } @@ -61,6 +66,11 @@ func byteSliceToArray(vals [][]byte) (**C.char, *C.size_t) { } +// freeCharsArray frees a **C.char that is malloced by this library itself. +func freeCharsArray(charsArray **C.char) { + C.free(unsafe.Pointer(charsArray)) +} + // Go []byte to C string // The C string is allocated in the C heap using malloc. func cByteSlice(b []byte) *C.char { diff --git a/write_batch.go b/write_batch.go index d22dcc96..3d7bb457 100644 --- a/write_batch.go +++ b/write_batch.go @@ -6,13 +6,11 @@ import "C" import ( "errors" "io" - "unsafe" ) // WriteBatch is a batching of Puts, Merges and Deletes. type WriteBatch struct { - c *C.rocksdb_writebatch_t - charArrays []**C.char + c *C.rocksdb_writebatch_t } // NewWriteBatch create a WriteBatch object. @@ -50,9 +48,10 @@ func (wb *WriteBatch) PutMany(keys, values [][]byte) error { return errors.New("Number of keys and values should be the same") } numPairs := C.size_t(len(keys)) - cKeys, cKeySizes := byteSliceToArray(keys) - cValues, cValueSizes := byteSliceToArray(values) - wb.charArrays = append(wb.charArrays, cKeys, cValues) + cKeys, cKeySizes := bytesSliceToArray(keys) + defer freeCharsArray(cKeys) + cValues, cValueSizes := bytesSliceToArray(values) + defer freeCharsArray(cValues) C.gorocksdb_writebatch_put_many( wb.c, numPairs, @@ -68,9 +67,10 @@ func (wb *WriteBatch) PutManyCF(cf *ColumnFamilyHandle, keys, values [][]byte) e return errors.New("Number of keys and values should be the same") } numPairs := C.size_t(len(keys)) - cKeys, cKeySizes := byteSliceToArray(keys) - cValues, cValueSizes := byteSliceToArray(values) - wb.charArrays = append(wb.charArrays, cKeys, cValues) + cKeys, cKeySizes := bytesSliceToArray(keys) + defer freeCharsArray(cKeys) + cValues, cValueSizes := bytesSliceToArray(values) + defer freeCharsArray(cValues) C.gorocksdb_writebatch_put_many_cf( wb.c, cf.c, numPairs, @@ -136,9 +136,6 @@ func (wb *WriteBatch) Clear() { // Destroy deallocates the WriteBatch object. func (wb *WriteBatch) Destroy() { C.rocksdb_writebatch_destroy(wb.c) - for _, arr := range wb.charArrays { - C.free(unsafe.Pointer(arr)) - } wb.c = nil } From 45daa3821dd0c86ecb22c8a03042bbb903d2fa86 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Wed, 31 Jan 2018 11:21:14 +0100 Subject: [PATCH 29/55] Add an Exists method to Slice to make it possible to make code clearer --- db_test.go | 28 ++++++++++++++++++++++++---- slice.go | 8 +++++++- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/db_test.go b/db_test.go index 8963f7b7..c1376948 100644 --- a/db_test.go +++ b/db_test.go @@ -18,12 +18,20 @@ func TestDBCRUD(t *testing.T) { var ( givenKey = []byte("hello") - givenVal1 = []byte("world1") - givenVal2 = []byte("world2") + givenVal1 = []byte("") + givenVal2 = []byte("world1") + givenVal3 = []byte("world2") wo = NewDefaultWriteOptions() ro = NewDefaultReadOptions() ) + // retrieve before create + noexist, err := db.Get(ro, givenKey) + defer noexist.Free() + ensure.Nil(t, err) + ensure.False(t, noexist.Exists()) + ensure.DeepEqual(t, noexist.Data(), []byte(nil)) + // create ensure.Nil(t, db.Put(wo, givenKey, givenVal1)) @@ -31,6 +39,7 @@ func TestDBCRUD(t *testing.T) { v1, err := db.Get(ro, givenKey) defer v1.Free() ensure.Nil(t, err) + ensure.True(t, v1.Exists()) ensure.DeepEqual(t, v1.Data(), givenVal1) // update @@ -38,13 +47,24 @@ func TestDBCRUD(t *testing.T) { v2, err := db.Get(ro, givenKey) defer v2.Free() ensure.Nil(t, err) + ensure.True(t, v2.Exists()) ensure.DeepEqual(t, v2.Data(), givenVal2) + // update + ensure.Nil(t, db.Put(wo, givenKey, givenVal3)) + v3, err := db.Get(ro, givenKey) + defer v3.Free() + ensure.Nil(t, err) + ensure.True(t, v3.Exists()) + ensure.DeepEqual(t, v3.Data(), givenVal3) + // delete ensure.Nil(t, db.Delete(wo, givenKey)) - v3, err := db.Get(ro, givenKey) + v4, err := db.Get(ro, givenKey) + defer v4.Free() ensure.Nil(t, err) - ensure.True(t, v3.Data() == nil) + ensure.False(t, v4.Exists()) + ensure.DeepEqual(t, v4.Data(), []byte(nil)) } func newTestDB(t *testing.T, name string, applyOpts func(opts *Options)) *DB { diff --git a/slice.go b/slice.go index d8b7a2e9..74cf96f5 100644 --- a/slice.go +++ b/slice.go @@ -23,7 +23,8 @@ func StringToSlice(data string) *Slice { return NewSlice(C.CString(data), C.size_t(len(data))) } -// Data returns the data of the slice. +// Data returns the data of the slice. If the key doesn't exist this will be a +// nil slice. func (s *Slice) Data() []byte { return charToByte(s.data, s.size) } @@ -33,6 +34,11 @@ func (s *Slice) Size() int { return int(s.size) } +// Exists returns if the key exists +func (s *Slice) Exists() bool { + return s.data != nil +} + // Free frees the slice data. func (s *Slice) Free() { if !s.freed { From dd409efa2f3f210f2d6d7594256148a3f0f4e11e Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Wed, 31 Jan 2018 11:23:41 +0100 Subject: [PATCH 30/55] Gitignore: Ignore .rocksdb-dir --- .gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..4d8938be --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +/.rocksdb-repo From e5116c98972e1791548f521a65820c42e6e805fc Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Wed, 31 Jan 2018 14:32:03 +0100 Subject: [PATCH 31/55] Make bytesSliceToArray acknowledge CGO rules of not passing Go pointers in C memory --- util.go | 36 ++++++++++++++++++++++++------------ write_batch.go | 8 ++++---- 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/util.go b/util.go index 737706d6..50b3cac5 100644 --- a/util.go +++ b/util.go @@ -49,26 +49,24 @@ func bytesSliceToArray(vals [][]byte) (**C.char, *C.size_t) { return nil, nil } - chars := make([]*C.char, len(vals)) - sizes := make([]C.size_t, len(vals)) + chars, cChars := emptyCharSlice(len(vals)) + sizes, cSizes := emptySizetSlice(len(vals)) for i, val := range vals { - chars[i] = byteToChar(val) + chars[i] = (*C.char)(C.CBytes(val)) sizes[i] = C.size_t(len(val)) } - cCharBuf := C.malloc(C.size_t(unsafe.Sizeof(chars[0])) * C.size_t(len(chars))) - copy(((*[1 << 32]*C.char)(cCharBuf))[:], chars) - - cChars := (**C.char)(cCharBuf) - - cSizes := (*C.size_t)(unsafe.Pointer(&sizes[0])) return cChars, cSizes - } // freeCharsArray frees a **C.char that is malloced by this library itself. -func freeCharsArray(charsArray **C.char) { - C.free(unsafe.Pointer(charsArray)) +func freeCharsArray(charsArray **C.char, length int) { + var charsSlice []*C.char + sH := (*reflect.SliceHeader)(unsafe.Pointer(&charsSlice)) + sH.Cap, sH.Len, sH.Data = length, length, uintptr(unsafe.Pointer(charsArray)) + for _, chars := range charsSlice { + C.free(unsafe.Pointer(chars)) + } } // Go []byte to C string @@ -89,6 +87,20 @@ func stringToChar(s string) *C.char { return (*C.char)(unsafe.Pointer(ptrStr.Data)) } +func emptyCharSlice(length int) (slice []*C.char, cSlice **C.char) { + slice = make([]*C.char, length) + sH := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) + cSlice = (**C.char)(unsafe.Pointer(sH.Data)) + return slice, cSlice +} + +func emptySizetSlice(length int) (slice []C.size_t, cSlice *C.size_t) { + slice = make([]C.size_t, length) + sH := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) + cSlice = (*C.size_t)(unsafe.Pointer(sH.Data)) + return slice, cSlice +} + // charSlice converts a C array of *char to a []*C.char. func charSlice(data **C.char, len C.int) []*C.char { var value []*C.char diff --git a/write_batch.go b/write_batch.go index 3d7bb457..1052edfd 100644 --- a/write_batch.go +++ b/write_batch.go @@ -49,9 +49,9 @@ func (wb *WriteBatch) PutMany(keys, values [][]byte) error { } numPairs := C.size_t(len(keys)) cKeys, cKeySizes := bytesSliceToArray(keys) - defer freeCharsArray(cKeys) + defer freeCharsArray(cKeys, len(keys)) cValues, cValueSizes := bytesSliceToArray(values) - defer freeCharsArray(cValues) + defer freeCharsArray(cValues, len(values)) C.gorocksdb_writebatch_put_many( wb.c, numPairs, @@ -68,9 +68,9 @@ func (wb *WriteBatch) PutManyCF(cf *ColumnFamilyHandle, keys, values [][]byte) e } numPairs := C.size_t(len(keys)) cKeys, cKeySizes := bytesSliceToArray(keys) - defer freeCharsArray(cKeys) + defer freeCharsArray(cKeys, len(keys)) cValues, cValueSizes := bytesSliceToArray(values) - defer freeCharsArray(cValues) + defer freeCharsArray(cValues, len(values)) C.gorocksdb_writebatch_put_many_cf( wb.c, cf.c, numPairs, From 787b875672e967d19a210d4dd41cc31e4e97bb12 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Wed, 31 Jan 2018 14:49:46 +0100 Subject: [PATCH 32/55] Implement multiget --- db.go | 44 ++++++++++++++++++++++++++++++++++++++++++++ db_test.go | 32 ++++++++++++++++++++++++++++++++ slice.go | 8 ++++++++ 3 files changed, 84 insertions(+) diff --git a/db.go b/db.go index 2b67f354..5ca3bd98 100644 --- a/db.go +++ b/db.go @@ -5,6 +5,7 @@ package gorocksdb import "C" import ( "errors" + "fmt" "unsafe" ) @@ -263,6 +264,49 @@ func (db *DB) GetCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte) (*Sli return NewSlice(cValue, cValLen), nil } +// Get returns the data associated with the key from the database. +func (db *DB) MultiGet(opts *ReadOptions, keys ...[]byte) (Slices, error) { + cKeys, cKeySizes := bytesSliceToArray(keys) + defer freeCharsArray(cKeys, len(keys)) + vals, cVals := emptyCharSlice(len(keys)) + rocksErrs, cRocksErrs := emptyCharSlice(len(keys)) + valSizes, cValSizes := emptySizetSlice(len(keys)) + _ = vals + _ = valSizes + + C.rocksdb_multi_get( + db.c, + opts.c, + C.size_t(len(keys)), + cKeys, + cKeySizes, + cVals, + cValSizes, + cRocksErrs, + ) + + var errs []error + + for i, rocksErr := range rocksErrs { + if rocksErr != nil { + defer C.free(unsafe.Pointer(rocksErr)) + err := fmt.Errorf("getting %q failed: %v", string(keys[i]), C.GoString(rocksErr)) + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("failed to get %d keys, first error: %v", len(errs), errs[0]) + } + + slices := make(Slices, len(keys)) + for i, val := range vals { + slices[i] = NewSlice(val, valSizes[i]) + } + + return slices, nil +} + // Put writes data associated with a key to the database. func (db *DB) Put(opts *WriteOptions, key, value []byte) error { var ( diff --git a/db_test.go b/db_test.go index 8963f7b7..fa668e5a 100644 --- a/db_test.go +++ b/db_test.go @@ -64,3 +64,35 @@ func newTestDB(t *testing.T, name string, applyOpts func(opts *Options)) *DB { return db } + +func TestDBMultiGet(t *testing.T) { + db := newTestDB(t, "TestDBMultiGet", nil) + defer db.Close() + + var ( + givenKey1 = []byte("hello1") + givenKey2 = []byte("hello2") + givenKey3 = []byte("hello3") + givenVal1 = []byte("world1") + givenVal2 = []byte("world2") + givenVal3 = []byte("world3") + wo = NewDefaultWriteOptions() + ro = NewDefaultReadOptions() + ) + + // create + ensure.Nil(t, db.Put(wo, givenKey1, givenVal1)) + ensure.Nil(t, db.Put(wo, givenKey2, givenVal2)) + ensure.Nil(t, db.Put(wo, givenKey3, givenVal3)) + + // retrieve + values, err := db.MultiGet(ro, []byte("noexist"), givenKey1, givenKey2, givenKey3) + defer values.Destroy() + ensure.Nil(t, err) + ensure.DeepEqual(t, len(values), 4) + + ensure.DeepEqual(t, values[0].Data(), []byte(nil)) + ensure.DeepEqual(t, values[1].Data(), givenVal1) + ensure.DeepEqual(t, values[2].Data(), givenVal2) + ensure.DeepEqual(t, values[3].Data(), givenVal3) +} diff --git a/slice.go b/slice.go index d8b7a2e9..22b48e09 100644 --- a/slice.go +++ b/slice.go @@ -11,6 +11,14 @@ type Slice struct { freed bool } +type Slices []*Slice + +func (slices Slices) Destroy() { + for _, s := range slices { + s.Free() + } +} + // NewSlice returns a slice with the given data. func NewSlice(data *C.char, size C.size_t) *Slice { return &Slice{data, size, false} From e2ceddee86f7e2048605cf0974bf9cabae51e333 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Wed, 31 Jan 2018 15:19:13 +0100 Subject: [PATCH 33/55] Simplify slices of c element handling --- array.go | 47 +++++++++++++++++++++++++++++++++++++++++++++++ db.go | 22 ++++++++++------------ util.go | 44 -------------------------------------------- write_batch.go | 24 ++++++++++++------------ 4 files changed, 69 insertions(+), 68 deletions(-) create mode 100644 array.go diff --git a/array.go b/array.go new file mode 100644 index 00000000..246842f6 --- /dev/null +++ b/array.go @@ -0,0 +1,47 @@ +package gorocksdb + +// #include "stdlib.h" +import "C" +import ( + "reflect" + "unsafe" +) + +type charsSlice []*C.char +type sizeTSlice []C.size_t + +func (s charsSlice) c() **C.char { + sH := (*reflect.SliceHeader)(unsafe.Pointer(&s)) + return (**C.char)(unsafe.Pointer(sH.Data)) +} + +func (s sizeTSlice) c() *C.size_t { + sH := (*reflect.SliceHeader)(unsafe.Pointer(&s)) + return (*C.size_t)(unsafe.Pointer(sH.Data)) +} + +// bytesSliceToCSlices converts a slice of byte slices to two slices with C +// datatypes. One containing pointers to copies of the byte slices and one +// containing their sizes. +// IMPORTANT: All the contents of the charsSlice array are malloced and +// should be freed using the Destroy method of charsSlice. +func byteSlicesToCSlices(vals [][]byte) (charsSlice, sizeTSlice) { + if len(vals) == 0 { + return nil, nil + } + + chars := make(charsSlice, len(vals)) + sizes := make(sizeTSlice, len(vals)) + for i, val := range vals { + chars[i] = (*C.char)(C.CBytes(val)) + sizes[i] = C.size_t(len(val)) + } + + return chars, sizes +} + +func (s charsSlice) Destroy() { + for _, chars := range s { + C.free(unsafe.Pointer(chars)) + } +} diff --git a/db.go b/db.go index 5ca3bd98..8f40e8d3 100644 --- a/db.go +++ b/db.go @@ -266,23 +266,21 @@ func (db *DB) GetCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte) (*Sli // Get returns the data associated with the key from the database. func (db *DB) MultiGet(opts *ReadOptions, keys ...[]byte) (Slices, error) { - cKeys, cKeySizes := bytesSliceToArray(keys) - defer freeCharsArray(cKeys, len(keys)) - vals, cVals := emptyCharSlice(len(keys)) - rocksErrs, cRocksErrs := emptyCharSlice(len(keys)) - valSizes, cValSizes := emptySizetSlice(len(keys)) - _ = vals - _ = valSizes + cKeys, cKeySizes := byteSlicesToCSlices(keys) + defer cKeys.Destroy() + vals := make(charsSlice, len(keys)) + valSizes := make(sizeTSlice, len(keys)) + rocksErrs := make(charsSlice, len(keys)) C.rocksdb_multi_get( db.c, opts.c, C.size_t(len(keys)), - cKeys, - cKeySizes, - cVals, - cValSizes, - cRocksErrs, + cKeys.c(), + cKeySizes.c(), + vals.c(), + valSizes.c(), + rocksErrs.c(), ) var errs []error diff --git a/util.go b/util.go index 50b3cac5..9c373306 100644 --- a/util.go +++ b/util.go @@ -1,6 +1,5 @@ package gorocksdb -// #include "stdlib.h" import "C" import ( "reflect" @@ -40,35 +39,6 @@ func byteToChar(b []byte) *C.char { return c } -// bytesSliceToArray converts a slice of byte slices to two C arrays. One -// containing pointers to the byte slices and one containing their sizes. -// IMPORTANT: The **C.char array is malloced and should be freed using -// freeCharsArray after it is used. -func bytesSliceToArray(vals [][]byte) (**C.char, *C.size_t) { - if len(vals) == 0 { - return nil, nil - } - - chars, cChars := emptyCharSlice(len(vals)) - sizes, cSizes := emptySizetSlice(len(vals)) - for i, val := range vals { - chars[i] = (*C.char)(C.CBytes(val)) - sizes[i] = C.size_t(len(val)) - } - - return cChars, cSizes -} - -// freeCharsArray frees a **C.char that is malloced by this library itself. -func freeCharsArray(charsArray **C.char, length int) { - var charsSlice []*C.char - sH := (*reflect.SliceHeader)(unsafe.Pointer(&charsSlice)) - sH.Cap, sH.Len, sH.Data = length, length, uintptr(unsafe.Pointer(charsArray)) - for _, chars := range charsSlice { - C.free(unsafe.Pointer(chars)) - } -} - // Go []byte to C string // The C string is allocated in the C heap using malloc. func cByteSlice(b []byte) *C.char { @@ -87,20 +57,6 @@ func stringToChar(s string) *C.char { return (*C.char)(unsafe.Pointer(ptrStr.Data)) } -func emptyCharSlice(length int) (slice []*C.char, cSlice **C.char) { - slice = make([]*C.char, length) - sH := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) - cSlice = (**C.char)(unsafe.Pointer(sH.Data)) - return slice, cSlice -} - -func emptySizetSlice(length int) (slice []C.size_t, cSlice *C.size_t) { - slice = make([]C.size_t, length) - sH := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) - cSlice = (*C.size_t)(unsafe.Pointer(sH.Data)) - return slice, cSlice -} - // charSlice converts a C array of *char to a []*C.char. func charSlice(data **C.char, len C.int) []*C.char { var value []*C.char diff --git a/write_batch.go b/write_batch.go index 1052edfd..eb0d1b92 100644 --- a/write_batch.go +++ b/write_batch.go @@ -48,15 +48,15 @@ func (wb *WriteBatch) PutMany(keys, values [][]byte) error { return errors.New("Number of keys and values should be the same") } numPairs := C.size_t(len(keys)) - cKeys, cKeySizes := bytesSliceToArray(keys) - defer freeCharsArray(cKeys, len(keys)) - cValues, cValueSizes := bytesSliceToArray(values) - defer freeCharsArray(cValues, len(values)) + cKeys, cKeySizes := byteSlicesToCSlices(keys) + defer cKeys.Destroy() + cValues, cValueSizes := byteSlicesToCSlices(values) + defer cValues.Destroy() C.gorocksdb_writebatch_put_many( wb.c, numPairs, - cKeys, cKeySizes, - cValues, cValueSizes, + cKeys.c(), cKeySizes.c(), + cValues.c(), cValueSizes.c(), ) return nil } @@ -67,15 +67,15 @@ func (wb *WriteBatch) PutManyCF(cf *ColumnFamilyHandle, keys, values [][]byte) e return errors.New("Number of keys and values should be the same") } numPairs := C.size_t(len(keys)) - cKeys, cKeySizes := bytesSliceToArray(keys) - defer freeCharsArray(cKeys, len(keys)) - cValues, cValueSizes := bytesSliceToArray(values) - defer freeCharsArray(cValues, len(values)) + cKeys, cKeySizes := byteSlicesToCSlices(keys) + defer cKeys.Destroy() + cValues, cValueSizes := byteSlicesToCSlices(values) + defer cValues.Destroy() C.gorocksdb_writebatch_put_many_cf( wb.c, cf.c, numPairs, - cKeys, cKeySizes, - cValues, cValueSizes, + cKeys.c(), cKeySizes.c(), + cValues.c(), cValueSizes.c(), ) return nil } From 50fd44bab6c06e28132aadb19ec05570a9141300 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Wed, 31 Jan 2018 15:24:29 +0100 Subject: [PATCH 34/55] Revert early freeing of byte slices in PutMany to be safer --- write_batch.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/write_batch.go b/write_batch.go index eb0d1b92..4675b9f8 100644 --- a/write_batch.go +++ b/write_batch.go @@ -10,7 +10,8 @@ import ( // WriteBatch is a batching of Puts, Merges and Deletes. type WriteBatch struct { - c *C.rocksdb_writebatch_t + c *C.rocksdb_writebatch_t + charsSlices []charsSlice } // NewWriteBatch create a WriteBatch object. @@ -49,9 +50,8 @@ func (wb *WriteBatch) PutMany(keys, values [][]byte) error { } numPairs := C.size_t(len(keys)) cKeys, cKeySizes := byteSlicesToCSlices(keys) - defer cKeys.Destroy() cValues, cValueSizes := byteSlicesToCSlices(values) - defer cValues.Destroy() + wb.charsSlices = append(wb.charsSlices, cKeys, cValues) C.gorocksdb_writebatch_put_many( wb.c, numPairs, @@ -68,9 +68,8 @@ func (wb *WriteBatch) PutManyCF(cf *ColumnFamilyHandle, keys, values [][]byte) e } numPairs := C.size_t(len(keys)) cKeys, cKeySizes := byteSlicesToCSlices(keys) - defer cKeys.Destroy() cValues, cValueSizes := byteSlicesToCSlices(values) - defer cValues.Destroy() + wb.charsSlices = append(wb.charsSlices, cKeys, cValues) C.gorocksdb_writebatch_put_many_cf( wb.c, cf.c, numPairs, @@ -137,6 +136,9 @@ func (wb *WriteBatch) Clear() { func (wb *WriteBatch) Destroy() { C.rocksdb_writebatch_destroy(wb.c) wb.c = nil + for _, slice := range wb.charsSlices { + slice.Destroy() + } } // WriteBatchRecordType describes the type of a batch record. From c52f0a23d78969cefc9cad9ff933117fb02b891f Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Wed, 31 Jan 2018 16:34:46 +0100 Subject: [PATCH 35/55] Implement MultiGet for column families --- array.go | 7 +++++ cf_handle.go | 10 +++++++ cf_test.go | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++++ db.go | 54 ++++++++++++++++++++++++++++++++++++- 4 files changed, 145 insertions(+), 1 deletion(-) diff --git a/array.go b/array.go index 246842f6..c5a12289 100644 --- a/array.go +++ b/array.go @@ -1,6 +1,7 @@ package gorocksdb // #include "stdlib.h" +// #include "rocksdb/c.h" import "C" import ( "reflect" @@ -9,6 +10,7 @@ import ( type charsSlice []*C.char type sizeTSlice []C.size_t +type columnFamilySlice []*C.rocksdb_column_family_handle_t func (s charsSlice) c() **C.char { sH := (*reflect.SliceHeader)(unsafe.Pointer(&s)) @@ -20,6 +22,11 @@ func (s sizeTSlice) c() *C.size_t { return (*C.size_t)(unsafe.Pointer(sH.Data)) } +func (s columnFamilySlice) c() **C.rocksdb_column_family_handle_t { + sH := (*reflect.SliceHeader)(unsafe.Pointer(&s)) + return (**C.rocksdb_column_family_handle_t)(unsafe.Pointer(sH.Data)) +} + // bytesSliceToCSlices converts a slice of byte slices to two slices with C // datatypes. One containing pointers to copies of the byte slices and one // containing their sizes. diff --git a/cf_handle.go b/cf_handle.go index fe8106c8..6ded4c59 100644 --- a/cf_handle.go +++ b/cf_handle.go @@ -24,3 +24,13 @@ func (h *ColumnFamilyHandle) UnsafeGetCFHandler() unsafe.Pointer { func (h *ColumnFamilyHandle) Destroy() { C.rocksdb_column_family_handle_destroy(h.c) } + +type ColumnFamilyHandles []*ColumnFamilyHandle + +func (cfs ColumnFamilyHandles) toCSlice() columnFamilySlice { + cCFs := make(columnFamilySlice, len(cfs)) + for i, cf := range cfs { + cCFs[i] = cf.c + } + return cCFs +} diff --git a/cf_test.go b/cf_test.go index 3d62759e..f6db7c1f 100644 --- a/cf_test.go +++ b/cf_test.go @@ -152,3 +152,78 @@ func TestColumnFamilyPutGetDelete(t *testing.T) { ensure.Nil(t, err) ensure.DeepEqual(t, actualVal.Size(), 0) } + +func newTestDBCF(t *testing.T, name string) (db *DB, cfh []*ColumnFamilyHandle, cleanup func()) { + dir, err := ioutil.TempDir("", "gorocksdb-TestColumnFamilyPutGet") + ensure.Nil(t, err) + + givenNames := []string{"default", "guide"} + opts := NewDefaultOptions() + opts.SetCreateIfMissingColumnFamilies(true) + opts.SetCreateIfMissing(true) + db, cfh, err = OpenDbColumnFamilies(opts, dir, givenNames, []*Options{opts, opts}) + ensure.Nil(t, err) + cleanup = func() { + for _, cf := range cfh { + cf.Destroy() + } + db.Close() + } + return db, cfh, cleanup +} + +func TestColumnFamilyMultiGet(t *testing.T) { + db, cfh, cleanup := newTestDBCF(t, "TestDBMultiGet") + defer cleanup() + + var ( + givenKey1 = []byte("hello1") + givenKey2 = []byte("hello2") + givenKey3 = []byte("hello3") + givenVal1 = []byte("world1") + givenVal2 = []byte("world2") + givenVal3 = []byte("world3") + wo = NewDefaultWriteOptions() + ro = NewDefaultReadOptions() + ) + + // create + ensure.Nil(t, db.PutCF(wo, cfh[0], givenKey1, givenVal1)) + ensure.Nil(t, db.PutCF(wo, cfh[1], givenKey2, givenVal2)) + ensure.Nil(t, db.PutCF(wo, cfh[1], givenKey3, givenVal3)) + + // column family 0 only has givenKey1 + values, err := db.MultiGetCF(ro, cfh[0], []byte("noexist"), givenKey1, givenKey2, givenKey3) + defer values.Destroy() + ensure.Nil(t, err) + ensure.DeepEqual(t, len(values), 4) + + ensure.DeepEqual(t, values[0].Data(), []byte(nil)) + ensure.DeepEqual(t, values[1].Data(), givenVal1) + ensure.DeepEqual(t, values[2].Data(), []byte(nil)) + ensure.DeepEqual(t, values[3].Data(), []byte(nil)) + + // column family 1 only has givenKey2 and givenKey3 + values, err = db.MultiGetCF(ro, cfh[1], []byte("noexist"), givenKey1, givenKey2, givenKey3) + defer values.Destroy() + ensure.Nil(t, err) + ensure.DeepEqual(t, len(values), 4) + + ensure.DeepEqual(t, values[0].Data(), []byte(nil)) + ensure.DeepEqual(t, values[1].Data(), []byte(nil)) + ensure.DeepEqual(t, values[2].Data(), givenVal2) + ensure.DeepEqual(t, values[3].Data(), givenVal3) + + // getting them all from the right CF should return them all + values, err = db.MultiGetCFMultiCF(ro, + ColumnFamilyHandles{cfh[0], cfh[1], cfh[1]}, + [][]byte{givenKey1, givenKey2, givenKey3}, + ) + defer values.Destroy() + ensure.Nil(t, err) + ensure.DeepEqual(t, len(values), 3) + + ensure.DeepEqual(t, values[0].Data(), givenVal1) + ensure.DeepEqual(t, values[1].Data(), givenVal2) + ensure.DeepEqual(t, values[2].Data(), givenVal3) +} diff --git a/db.go b/db.go index 8f40e8d3..fdfc8186 100644 --- a/db.go +++ b/db.go @@ -264,7 +264,7 @@ func (db *DB) GetCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte) (*Sli return NewSlice(cValue, cValLen), nil } -// Get returns the data associated with the key from the database. +// MultiGet returns the data associated with the passed keys from the database func (db *DB) MultiGet(opts *ReadOptions, keys ...[]byte) (Slices, error) { cKeys, cKeySizes := byteSlicesToCSlices(keys) defer cKeys.Destroy() @@ -305,6 +305,58 @@ func (db *DB) MultiGet(opts *ReadOptions, keys ...[]byte) (Slices, error) { return slices, nil } +// MultiGetCF returns the data associated with the passed keys from the column family +func (db *DB) MultiGetCF(opts *ReadOptions, cf *ColumnFamilyHandle, keys ...[]byte) (Slices, error) { + cfs := make(ColumnFamilyHandles, len(keys)) + for i := 0; i < len(keys); i++ { + cfs[i] = cf + } + return db.MultiGetCFMultiCF(opts, cfs, keys) +} + +// MultiGetCFMultiCF returns the data associated with the passed keys and +// column families. +func (db *DB) MultiGetCFMultiCF(opts *ReadOptions, cfs ColumnFamilyHandles, keys [][]byte) (Slices, error) { + cKeys, cKeySizes := byteSlicesToCSlices(keys) + defer cKeys.Destroy() + vals := make(charsSlice, len(keys)) + valSizes := make(sizeTSlice, len(keys)) + rocksErrs := make(charsSlice, len(keys)) + + C.rocksdb_multi_get_cf( + db.c, + opts.c, + cfs.toCSlice().c(), + C.size_t(len(keys)), + cKeys.c(), + cKeySizes.c(), + vals.c(), + valSizes.c(), + rocksErrs.c(), + ) + + var errs []error + + for i, rocksErr := range rocksErrs { + if rocksErr != nil { + defer C.free(unsafe.Pointer(rocksErr)) + err := fmt.Errorf("getting %q failed: %v", string(keys[i]), C.GoString(rocksErr)) + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("failed to get %d keys, first error: %v", len(errs), errs[0]) + } + + slices := make(Slices, len(keys)) + for i, val := range vals { + slices[i] = NewSlice(val, valSizes[i]) + } + + return slices, nil +} + // Put writes data associated with a key to the database. func (db *DB) Put(opts *WriteOptions, key, value []byte) error { var ( From dfb6148dbf3202cb3c1c909d9c88a378e47c17d8 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Wed, 31 Jan 2018 16:50:52 +0100 Subject: [PATCH 36/55] Only build go 1.9 --- .travis.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 982b7715..0ba6beac 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,6 @@ language: go go: - - 1.6 - - tip + - '1.9.3' before_install: - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test From dcebded71fae7ed19739e7b5baffa4591a80ad6d Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Wed, 31 Jan 2018 18:23:34 +0100 Subject: [PATCH 37/55] Test using cgocheck=2 and fix compaction_filter test for that --- .travis.yml | 2 +- compaction_filter_test.go | 5 ++++- util.go | 14 ++++++++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0ba6beac..4164eb27 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,7 +24,7 @@ install: - go get -t ./... script: - - go test -v ./ + - GODEBUG=cgocheck=2 go test -v ./ notifications: email: diff --git a/compaction_filter_test.go b/compaction_filter_test.go index 1dfcd63e..2735cf16 100644 --- a/compaction_filter_test.go +++ b/compaction_filter_test.go @@ -11,9 +11,12 @@ func TestCompactionFilter(t *testing.T) { var ( changeKey = []byte("change") changeValOld = []byte("old") - changeValNew = []byte("new") + changeValNew = cBackedBytes([]byte("new")) deleteKey = []byte("delete") ) + + defer freeCBackedBytes(changeValNew) + db := newTestDB(t, "TestCompactionFilter", func(opts *Options) { opts.SetCompactionFilter(&mockCompactionFilter{ filter: func(level int, key, val []byte) (remove bool, newVal []byte) { diff --git a/util.go b/util.go index 9c373306..dfda2d0e 100644 --- a/util.go +++ b/util.go @@ -1,5 +1,6 @@ package gorocksdb +// #include import "C" import ( "reflect" @@ -30,6 +31,19 @@ func charToByte(data *C.char, len C.size_t) []byte { return value } +// cBackedBytes returs a copy of the same byte slice which is backed by +// malloced memory. This should be freed using freeCBackedBytes. +func cBackedBytes(data []byte) []byte { + return charToByte(cByteSlice(data), C.size_t(len(data))) +} + +// freeCBackedBytes frees a byte slice created by cBackedBytes +func freeCBackedBytes(data []byte) { + sH := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + C.free(unsafe.Pointer(sH.Data)) + +} + // byteToChar returns *C.char from byte slice. func byteToChar(b []byte) *C.char { var c *C.char From 282102a9e03b4e0728f602f1ad940219ad3a5dc6 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Thu, 1 Feb 2018 10:22:56 +0100 Subject: [PATCH 38/55] Travis: Test with rocksdb v5.7.3 --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 4164eb27..26ef0009 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,6 +16,7 @@ before_install: install: - git clone https://github.com/facebook/rocksdb.git /tmp/rocksdb - pushd /tmp/rocksdb + - git checkout v5.7.3 - make clean - make shared_lib - sudo cp --preserve=links ./librocksdb.* /usr/lib/ From f8aef895c093b60a8afd62c2552deb56033d9ee9 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Mon, 6 May 2019 16:44:51 +0200 Subject: [PATCH 39/55] Add bindings for lowering CPU and IO priority of thread pools --- env.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/env.go b/env.go index 386335bc..dfdd583a 100644 --- a/env.go +++ b/env.go @@ -33,6 +33,22 @@ func (env *Env) SetHighPriorityBackgroundThreads(n int) { C.rocksdb_env_set_high_priority_background_threads(env.c, C.int(n)) } +func (env *Env) LowerThreadPoolIOPriority() { + C.rocksdb_env_lower_thread_pool_io_priority(env.c) +} + +func (env *Env) LowerHighPriorityThreadPoolIOPriority() { + C.rocksdb_env_lower_high_priority_thread_pool_io_priority(env.c) +} + +func (env *Env) LowerThreadPoolCPUPriority() { + C.rocksdb_env_lower_thread_pool_cpu_priority(env.c) +} + +func (env *Env) LowerHighPriorityThreadPoolCPUPriority() { + C.rocksdb_env_lower_high_priority_thread_pool_cpu_priority(env.c) +} + // Destroy deallocates the Env object. func (env *Env) Destroy() { C.rocksdb_env_destroy(env.c) From ee9d1f33fc6d1c96072b3ef95f51b9c72c4fb38b Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 25 Jun 2019 11:20:17 +0200 Subject: [PATCH 40/55] Use newer version of rocksdb on travis --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 26ef0009..f8f180fe 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,9 +14,9 @@ before_install: - sudo dpkg -i libgflags-dev_2.0-1.1ubuntu1_amd64.deb install: - - git clone https://github.com/facebook/rocksdb.git /tmp/rocksdb + - git clone https://github.com/GetStream/rocksdb.git /tmp/rocksdb - pushd /tmp/rocksdb - - git checkout v5.7.3 + - git checkout add-lower-priority-c-bindings - make clean - make shared_lib - sudo cp --preserve=links ./librocksdb.* /usr/lib/ From 2653ff46abbad0fd356cb3636b272dcfbd57c011 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 25 Jun 2019 11:20:17 +0200 Subject: [PATCH 41/55] Use newer version of rocksdb on travis --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7f474fef..daa33497 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,9 +16,9 @@ before_install: - sudo dpkg -i libgflags-dev_2.0-1.1ubuntu1_amd64.deb install: - - git clone https://github.com/facebook/rocksdb.git /tmp/rocksdb + - git clone https://github.com/GetStream/rocksdb.git /tmp/rocksdb - pushd /tmp/rocksdb - - git checkout v5.7.3 + - git checkout add-lower-priority-c-bindings - make clean - make shared_lib -j`nproc` - sudo cp --preserve=links ./librocksdb.* /usr/lib/ From e8d9b90d5618e37a8650003c40f7dff0ff876635 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 25 Jun 2019 12:48:10 +0200 Subject: [PATCH 42/55] Fix tests after merge --- db_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/db_test.go b/db_test.go index 863b3193..02cbdea2 100644 --- a/db_test.go +++ b/db_test.go @@ -75,8 +75,9 @@ func TestDBCRUDDBPaths(t *testing.T) { var ( givenKey = []byte("hello") - givenVal1 = []byte("world1") - givenVal2 = []byte("world2") + givenVal1 = []byte("") + givenVal2 = []byte("world1") + givenVal3 = []byte("world2") wo = NewDefaultWriteOptions() ro = NewDefaultReadOptions() ) From f498bc697d7c7623b7a8913630bb9975e2c1f648 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 25 Jun 2019 13:33:24 +0200 Subject: [PATCH 43/55] Fix tests --- db_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/db_test.go b/db_test.go index 02cbdea2..4ccc7aa8 100644 --- a/db_test.go +++ b/db_test.go @@ -21,7 +21,6 @@ func TestDBCRUD(t *testing.T) { givenKey = []byte("hello") givenVal1 = []byte("") givenVal2 = []byte("world1") - givenVal3 = []byte("world2") wo = NewDefaultWriteOptions() ro = NewDefaultReadOptions() ) @@ -46,7 +45,7 @@ func TestDBCRUD(t *testing.T) { v3, err := db.GetPinned(ro, givenKey) defer v3.Destroy() ensure.Nil(t, err) - ensure.DeepEqual(t, v3.Data(), givenVal3) + ensure.DeepEqual(t, v3.Data(), givenVal2) // delete ensure.Nil(t, db.Delete(wo, givenKey)) @@ -58,7 +57,7 @@ func TestDBCRUD(t *testing.T) { v5, err := db.GetPinned(ro, givenKey) defer v5.Destroy() ensure.Nil(t, err) - ensure.Nil(t, v5.Data()) + ensure.True(t, v5.Data() == nil) } func TestDBCRUDDBPaths(t *testing.T) { From c16f09d4d728d81640c59df9d825d613c6098e4d Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 25 Jun 2019 13:56:08 +0200 Subject: [PATCH 44/55] Use a malloc allocated bytes for set_iterate_upper_bound --- options_read.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/options_read.go b/options_read.go index 997fa163..a047c7ce 100644 --- a/options_read.go +++ b/options_read.go @@ -1,5 +1,6 @@ package gorocksdb +// #include // #include "rocksdb/c.h" import "C" import "unsafe" @@ -23,7 +24,8 @@ const ( // ReadOptions represent all of the available options when reading from a // database. type ReadOptions struct { - c *C.rocksdb_readoptions_t + c *C.rocksdb_readoptions_t + cIterateUpperBound *C.char } // NewDefaultReadOptions creates a default ReadOptions object. @@ -33,7 +35,7 @@ func NewDefaultReadOptions() *ReadOptions { // NewNativeReadOptions creates a ReadOptions object. func NewNativeReadOptions(c *C.rocksdb_readoptions_t) *ReadOptions { - return &ReadOptions{c} + return &ReadOptions{c: c} } // UnsafeGetReadOptions returns the underlying c read options object. @@ -93,9 +95,15 @@ func (opts *ReadOptions) SetTailing(value bool) { // implemented. // Default: nullptr func (opts *ReadOptions) SetIterateUpperBound(key []byte) { - cKey := byteToChar(key) + C.free(unsafe.Pointer(opts.cIterateUpperBound)) + if key == nil { + opts.cIterateUpperBound = nil + } else { + opts.cIterateUpperBound = cByteSlice(key) + } + cKeyLen := C.size_t(len(key)) - C.rocksdb_readoptions_set_iterate_upper_bound(opts.c, cKey, cKeyLen) + C.rocksdb_readoptions_set_iterate_upper_bound(opts.c, opts.cIterateUpperBound, cKeyLen) } // SetPinData specifies the value of "pin_data". If true, it keeps the blocks @@ -121,5 +129,6 @@ func (opts *ReadOptions) SetReadaheadSize(value uint64) { // Destroy deallocates the ReadOptions object. func (opts *ReadOptions) Destroy() { C.rocksdb_readoptions_destroy(opts.c) - opts.c = nil + C.free(unsafe.Pointer(opts.cIterateUpperBound)) + *opts = ReadOptions{} } From 7423e0b17a60ad9c87a66e88e629d4b972fa3e9e Mon Sep 17 00:00:00 2001 From: Marcelo Pires Date: Tue, 25 Feb 2020 12:57:20 +0100 Subject: [PATCH 45/55] Revert "Merge branch 'master' of github.com:tecbot/gorocksdb" This reverts commit 16bd82454278b8bd2a28d352f9e9a67bd0a1e048, reversing changes made to 42987db69212a5231fdbf954dfe035d982cee2d7. --- backup.go | 29 ++------ cache.go | 10 +-- checkpoint.go | 2 +- checkpoint_test.go | 3 +- db.go | 129 +++++++++------------------------ db_test.go | 0 dynflag.go | 2 +- env.go | 5 -- filter_policy.go | 6 -- iterator.go | 2 +- memory_usage.go | 4 +- merge_operator.go | 52 ++----------- merge_operator_test.go | 137 +---------------------------------- options.go | 65 ++--------------- options_block_based_table.go | 79 -------------------- options_read.go | 11 --- slice.go | 2 +- slice_transform.go | 5 -- slice_transform_test.go | 7 -- sst_file_writer.go | 6 +- staticflag_linux.go | 2 +- transaction.go | 12 +-- transactiondb.go | 10 +-- util.go | 1 - wal_iterator.go | 49 ------------- write_batch.go | 21 ------ write_batch_test.go | 12 --- 27 files changed, 84 insertions(+), 579 deletions(-) mode change 100755 => 100644 db.go mode change 100755 => 100644 db_test.go delete mode 100755 wal_iterator.go diff --git a/backup.go b/backup.go index 87621dd9..a6673ff8 100644 --- a/backup.go +++ b/backup.go @@ -89,7 +89,7 @@ func OpenBackupEngine(opts *Options, path string) (*BackupEngine, error) { be := C.rocksdb_backup_engine_open(opts.c, cpath, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return &BackupEngine{ @@ -104,25 +104,19 @@ func (b *BackupEngine) UnsafeGetBackupEngine() unsafe.Pointer { return unsafe.Pointer(b.c) } -// CreateNewBackupFlush takes a new backup from db. If flush is set to true, -// it flushes the WAL before taking the backup. -func (b *BackupEngine) CreateNewBackupFlush(db *DB, flush bool) error { +// CreateNewBackup takes a new backup from db. +func (b *BackupEngine) CreateNewBackup(db *DB) error { var cErr *C.char - C.rocksdb_backup_engine_create_new_backup_flush(b.c, db.c, boolToChar(flush), &cErr) + C.rocksdb_backup_engine_create_new_backup(b.c, db.c, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil } -// CreateNewBackup takes a new backup from db. -func (b *BackupEngine) CreateNewBackup(db *DB) error { - return b.CreateNewBackupFlush(db, false) -} - // GetInfo gets an object that gives information about // the backups that have already been taken func (b *BackupEngine) GetInfo() *BackupEngineInfo { @@ -144,18 +138,7 @@ func (b *BackupEngine) RestoreDBFromLatestBackup(dbDir, walDir string, ro *Resto C.rocksdb_backup_engine_restore_db_from_latest_backup(b.c, cDbDir, cWalDir, ro.c, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) - return errors.New(C.GoString(cErr)) - } - return nil -} - -// PurgeOldBackups deletes all backups older than the latest 'n' backups -func (b *BackupEngine) PurgeOldBackups(n uint32) error { - var cErr *C.char - C.rocksdb_backup_engine_purge_old_backups(b.c, C.uint32_t(n), &cErr) - if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/cache.go b/cache.go index 866326dc..ed708d7f 100644 --- a/cache.go +++ b/cache.go @@ -9,7 +9,7 @@ type Cache struct { } // NewLRUCache creates a new LRU Cache object with the capacity given. -func NewLRUCache(capacity uint64) *Cache { +func NewLRUCache(capacity int) *Cache { return NewNativeCache(C.rocksdb_cache_create_lru(C.size_t(capacity))) } @@ -19,13 +19,13 @@ func NewNativeCache(c *C.rocksdb_cache_t) *Cache { } // GetUsage returns the Cache memory usage. -func (c *Cache) GetUsage() uint64 { - return uint64(C.rocksdb_cache_get_usage(c.c)) +func (c *Cache) GetUsage() int { + return int(C.rocksdb_cache_get_usage(c.c)) } // GetPinnedUsage returns the Cache pinned memory usage. -func (c *Cache) GetPinnedUsage() uint64 { - return uint64(C.rocksdb_cache_get_pinned_usage(c.c)) +func (c *Cache) GetPinnedUsage() int { + return int(C.rocksdb_cache_get_pinned_usage(c.c)) } // Destroy deallocates the Cache object. diff --git a/checkpoint.go b/checkpoint.go index 4a6436d2..a7d2bf40 100644 --- a/checkpoint.go +++ b/checkpoint.go @@ -43,7 +43,7 @@ func (checkpoint *Checkpoint) CreateCheckpoint(checkpoint_dir string, log_size_f C.rocksdb_checkpoint_create(checkpoint.c, cDir, C.uint64_t(log_size_for_flush), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/checkpoint_test.go b/checkpoint_test.go index 1ea10fdb..9505740d 100644 --- a/checkpoint_test.go +++ b/checkpoint_test.go @@ -1,11 +1,10 @@ package gorocksdb import ( + "github.com/facebookgo/ensure" "io/ioutil" "os" "testing" - - "github.com/facebookgo/ensure" ) func TestCheckpoint(t *testing.T) { diff --git a/db.go b/db.go old mode 100755 new mode 100644 index 64735c61..e3c128ce --- a/db.go +++ b/db.go @@ -32,7 +32,7 @@ func OpenDb(opts *Options, name string) (*DB, error) { defer C.free(unsafe.Pointer(cName)) db := C.rocksdb_open(opts.c, cName, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return &DB{ @@ -51,7 +51,7 @@ func OpenDbWithTTL(opts *Options, name string, ttl int) (*DB, error) { defer C.free(unsafe.Pointer(cName)) db := C.rocksdb_open_with_ttl(opts.c, cName, C.int(ttl), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return &DB{ @@ -70,7 +70,7 @@ func OpenDbForReadOnly(opts *Options, name string, errorIfLogFileExist bool) (*D defer C.free(unsafe.Pointer(cName)) db := C.rocksdb_open_for_read_only(opts.c, cName, boolToChar(errorIfLogFileExist), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return &DB{ @@ -123,7 +123,7 @@ func OpenDbColumnFamilies( &cErr, ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, nil, errors.New(C.GoString(cErr)) } @@ -185,7 +185,7 @@ func OpenDbForReadOnlyColumnFamilies( &cErr, ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, nil, errors.New(C.GoString(cErr)) } @@ -211,15 +211,12 @@ func ListColumnFamilies(opts *Options, name string) ([]string, error) { defer C.free(unsafe.Pointer(cName)) cNames := C.rocksdb_list_column_families(opts.c, cName, &cLen, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } namesLen := int(cLen) names := make([]string, namesLen) - // The maximum capacity of the following two slices is limited to (2^29)-1 to remain compatible - // with 32-bit platforms. The size of a `*C.char` (a pointer) is 4 Byte on a 32-bit system - // and (2^29)*4 == math.MaxInt32 + 1. -- See issue golang/go#13656 - cNamesArr := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(cNames))[:namesLen:namesLen] + cNamesArr := (*[1 << 30]*C.char)(unsafe.Pointer(cNames))[:namesLen:namesLen] for i, n := range cNamesArr { names[i] = C.GoString(n) } @@ -246,7 +243,7 @@ func (db *DB) Get(opts *ReadOptions, key []byte) (*Slice, error) { ) cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewSlice(cValue, cValLen), nil @@ -261,13 +258,13 @@ func (db *DB) GetBytes(opts *ReadOptions, key []byte) ([]byte, error) { ) cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } if cValue == nil { return nil, nil } - defer C.rocksdb_free(unsafe.Pointer(cValue)) + defer C.free(unsafe.Pointer(cValue)) return C.GoBytes(unsafe.Pointer(cValue), C.int(cValLen)), nil } @@ -280,7 +277,7 @@ func (db *DB) GetCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte) (*Sli ) cValue := C.rocksdb_get_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cValLen, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewSlice(cValue, cValLen), nil @@ -294,7 +291,7 @@ func (db *DB) GetPinned(opts *ReadOptions, key []byte) (*PinnableSliceHandle, er ) cHandle := C.rocksdb_get_pinned(db.c, opts.c, cKey, C.size_t(len(key)), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewNativePinnableSliceHandle(cHandle), nil @@ -323,7 +320,7 @@ func (db *DB) MultiGet(opts *ReadOptions, keys ...[]byte) (Slices, error) { for i, rocksErr := range rocksErrs { if rocksErr != nil { - defer C.rocksdb_free(unsafe.Pointer(rocksErr)) + defer C.free(unsafe.Pointer(rocksErr)) err := fmt.Errorf("getting %q failed: %v", string(keys[i]), C.GoString(rocksErr)) errs = append(errs, err) } @@ -375,7 +372,7 @@ func (db *DB) MultiGetCFMultiCF(opts *ReadOptions, cfs ColumnFamilyHandles, keys for i, rocksErr := range rocksErrs { if rocksErr != nil { - defer C.rocksdb_free(unsafe.Pointer(rocksErr)) + defer C.free(unsafe.Pointer(rocksErr)) err := fmt.Errorf("getting %q failed: %v", string(keys[i]), C.GoString(rocksErr)) errs = append(errs, err) } @@ -402,7 +399,7 @@ func (db *DB) Put(opts *WriteOptions, key, value []byte) error { ) C.rocksdb_put(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -417,7 +414,7 @@ func (db *DB) PutCF(opts *WriteOptions, cf *ColumnFamilyHandle, key, value []byt ) C.rocksdb_put_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -431,7 +428,7 @@ func (db *DB) Delete(opts *WriteOptions, key []byte) error { ) C.rocksdb_delete(db.c, opts.c, cKey, C.size_t(len(key)), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -445,7 +442,7 @@ func (db *DB) DeleteCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte) e ) C.rocksdb_delete_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -460,7 +457,7 @@ func (db *DB) Merge(opts *WriteOptions, key []byte, value []byte) error { ) C.rocksdb_merge(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -476,7 +473,7 @@ func (db *DB) MergeCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte, va ) C.rocksdb_merge_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -487,7 +484,7 @@ func (db *DB) Write(opts *WriteOptions, batch *WriteBatch) error { var cErr *C.char C.rocksdb_write(db.c, opts.c, batch.c, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -507,20 +504,6 @@ func (db *DB) NewIteratorCF(opts *ReadOptions, cf *ColumnFamilyHandle) *Iterator return NewNativeIterator(unsafe.Pointer(cIter)) } -func (db *DB) GetUpdatesSince(seqNumber uint64) (*WalIterator, error) { - var cErr *C.char - cIter := C.rocksdb_get_updates_since(db.c, C.uint64_t(seqNumber), nil, &cErr) - if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) - return nil, errors.New(C.GoString(cErr)) - } - return NewNativeWalIterator(unsafe.Pointer(cIter)), nil -} - -func (db *DB) GetLatestSequenceNumber() uint64 { - return uint64(C.rocksdb_get_latest_sequence_number(db.c)) -} - // NewSnapshot creates a new snapshot of the database. func (db *DB) NewSnapshot() *Snapshot { cSnap := C.rocksdb_create_snapshot(db.c) @@ -538,7 +521,7 @@ func (db *DB) GetProperty(propName string) string { cprop := C.CString(propName) defer C.free(unsafe.Pointer(cprop)) cValue := C.rocksdb_property_value(db.c, cprop) - defer C.rocksdb_free(unsafe.Pointer(cValue)) + defer C.free(unsafe.Pointer(cValue)) return C.GoString(cValue) } @@ -547,7 +530,7 @@ func (db *DB) GetPropertyCF(propName string, cf *ColumnFamilyHandle) string { cProp := C.CString(propName) defer C.free(unsafe.Pointer(cProp)) cValue := C.rocksdb_property_value_cf(db.c, cf.c, cProp) - defer C.rocksdb_free(unsafe.Pointer(cValue)) + defer C.free(unsafe.Pointer(cValue)) return C.GoString(cValue) } @@ -560,7 +543,7 @@ func (db *DB) CreateColumnFamily(opts *Options, name string) (*ColumnFamilyHandl defer C.free(unsafe.Pointer(cName)) cHandle := C.rocksdb_create_column_family(db.c, opts.c, cName, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewNativeColumnFamilyHandle(cHandle), nil @@ -571,7 +554,7 @@ func (db *DB) DropColumnFamily(c *ColumnFamilyHandle) error { var cErr *C.char C.rocksdb_drop_column_family(db.c, c.c, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -685,7 +668,7 @@ func (db *DB) SetOptions(keys, values []string) error { &cErr, ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -746,7 +729,7 @@ func (db *DB) Flush(opts *FlushOptions) error { var cErr *C.char C.rocksdb_flush(db.c, opts.c, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -757,7 +740,7 @@ func (db *DB) DisableFileDeletions() error { var cErr *C.char C.rocksdb_disable_file_deletions(db.c, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -768,7 +751,7 @@ func (db *DB) EnableFileDeletions(force bool) error { var cErr *C.char C.rocksdb_enable_file_deletions(db.c, boolToChar(force), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -783,50 +766,6 @@ func (db *DB) DeleteFile(name string) { C.rocksdb_delete_file(db.c, cName) } -// DeleteFileInRange deletes SST files that contain keys between the Range, [r.Start, r.Limit] -func (db *DB) DeleteFileInRange(r Range) error { - cStartKey := byteToChar(r.Start) - cLimitKey := byteToChar(r.Limit) - - var cErr *C.char - - C.rocksdb_delete_file_in_range( - db.c, - cStartKey, C.size_t(len(r.Start)), - cLimitKey, C.size_t(len(r.Limit)), - &cErr, - ) - - if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) - return errors.New(C.GoString(cErr)) - } - return nil -} - -// DeleteFileInRangeCF deletes SST files that contain keys between the Range, [r.Start, r.Limit], and -// belong to a given column family -func (db *DB) DeleteFileInRangeCF(cf *ColumnFamilyHandle, r Range) error { - cStartKey := byteToChar(r.Start) - cLimitKey := byteToChar(r.Limit) - - var cErr *C.char - - C.rocksdb_delete_file_in_range_cf( - db.c, - cf.c, - cStartKey, C.size_t(len(r.Start)), - cLimitKey, C.size_t(len(r.Limit)), - &cErr, - ) - - if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) - return errors.New(C.GoString(cErr)) - } - return nil -} - // IngestExternalFile loads a list of external SST files. func (db *DB) IngestExternalFile(filePaths []string, opts *IngestExternalFileOptions) error { cFilePaths := make([]*C.char, len(filePaths)) @@ -850,7 +789,7 @@ func (db *DB) IngestExternalFile(filePaths []string, opts *IngestExternalFileOpt ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -880,7 +819,7 @@ func (db *DB) IngestExternalFileCF(handle *ColumnFamilyHandle, filePaths []strin ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -895,7 +834,7 @@ func (db *DB) NewCheckpoint() (*Checkpoint, error) { db.c, &cErr, ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } @@ -917,7 +856,7 @@ func DestroyDb(name string, opts *Options) error { defer C.free(unsafe.Pointer(cName)) C.rocksdb_destroy_db(opts.c, cName, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -932,7 +871,7 @@ func RepairDb(name string, opts *Options) error { defer C.free(unsafe.Pointer(cName)) C.rocksdb_repair_db(opts.c, cName, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/db_test.go b/db_test.go old mode 100755 new mode 100644 diff --git a/dynflag.go b/dynflag.go index 18c18f40..81909317 100644 --- a/dynflag.go +++ b/dynflag.go @@ -2,5 +2,5 @@ package gorocksdb -// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -ldl +// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy import "C" diff --git a/env.go b/env.go index d7b65dfc..dfdd583a 100644 --- a/env.go +++ b/env.go @@ -13,11 +13,6 @@ func NewDefaultEnv() *Env { return NewNativeEnv(C.rocksdb_create_default_env()) } -// NewMemEnv creates MemEnv for in-memory testing. -func NewMemEnv() *Env { - return NewNativeEnv(C.rocksdb_create_mem_env()) -} - // NewNativeEnv creates a Environment object. func NewNativeEnv(c *C.rocksdb_env_t) *Env { return &Env{c} diff --git a/filter_policy.go b/filter_policy.go index a9c222b0..ac57fd99 100644 --- a/filter_policy.go +++ b/filter_policy.go @@ -49,12 +49,6 @@ func NewBloomFilter(bitsPerKey int) FilterPolicy { return NewNativeFilterPolicy(C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey))) } -// NewBloomFilterFull returns a new filter policy created with use_block_based_builder=false -// (use full or partitioned filter). -func NewBloomFilterFull(bitsPerKey int) FilterPolicy { - return NewNativeFilterPolicy(C.rocksdb_filterpolicy_create_bloom_full(C.int(bitsPerKey))) -} - // Hold references to filter policies. var filterPolicies = NewCOWList() diff --git a/iterator.go b/iterator.go index aaf56435..ae11f426 100644 --- a/iterator.go +++ b/iterator.go @@ -115,7 +115,7 @@ func (iter *Iterator) Err() error { var cErr *C.char C.rocksdb_iter_get_error(iter.c, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/memory_usage.go b/memory_usage.go index 7b9a6ad6..740b877d 100644 --- a/memory_usage.go +++ b/memory_usage.go @@ -42,7 +42,7 @@ func GetApproximateMemoryUsageByType(dbs []*DB, caches []*Cache) (*MemoryUsage, var cErr *C.char memoryUsage := C.rocksdb_approximate_memory_usage_create(consumers, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } @@ -55,4 +55,4 @@ func GetApproximateMemoryUsageByType(dbs []*DB, caches []*Cache) (*MemoryUsage, CacheTotal: uint64(C.rocksdb_approximate_memory_usage_get_cache_total(memoryUsage)), } return result, nil -} +} \ No newline at end of file diff --git a/merge_operator.go b/merge_operator.go index 2de7f9ab..33f83948 100644 --- a/merge_operator.go +++ b/merge_operator.go @@ -28,14 +28,6 @@ type MergeOperator interface { // internal corruption. This will be treated as an error by the library. FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) - // The name of the MergeOperator. - Name() string -} - -// PartialMerger implements PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, err) -// When a MergeOperator implements this interface, PartialMerge will be called in addition -// to FullMerge for compactions across levels -type PartialMerger interface { // This function performs merge(left_op, right_op) // when both the operands are themselves merge operation types // that you would have passed to a db.Merge() call in the same order @@ -50,28 +42,9 @@ type PartialMerger interface { // The library will internally keep track of the operations, and apply them in the // correct order once a base-value (a Put/Delete/End-of-Database) is seen. PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) -} -// MultiMerger implements PartialMergeMulti(key []byte, operands [][]byte) ([]byte, err) -// When a MergeOperator implements this interface, PartialMergeMulti will be called in addition -// to FullMerge for compactions across levels -type MultiMerger interface { - // PartialMerge performs merge on multiple operands - // when all of the operands are themselves merge operation types - // that you would have passed to a db.Merge() call in the same order - // (i.e.: db.Merge(key,operand[0]), followed by db.Merge(key,operand[1]), - // ... db.Merge(key, operand[n])). - // - // PartialMerge should combine them into a single merge operation. - // The return value should be constructed such that a call to - // db.Merge(key, new_value) would yield the same result as a call - // to db.Merge(key,operand[0]), followed by db.Merge(key,operand[1]), - // ... db.Merge(key, operand[n])). - // - // If it is impossible or infeasible to combine the operations, return false. - // The library will internally keep track of the operations, and apply them in the - // correct order once a base-value (a Put/Delete/End-of-Database) is seen. - PartialMergeMulti(key []byte, operands [][]byte) ([]byte, bool) + // The name of the MergeOperator. + Name() string } // NewNativeMergeOperator creates a MergeOperator object. @@ -137,22 +110,13 @@ func gorocksdb_mergeoperator_partial_merge_multi(idx int, cKey *C.char, cKeyLen success := true merger := mergeOperators.Get(idx).(mergeOperatorWrapper).mergeOperator - - // check if this MergeOperator supports partial or multi merges - switch v := merger.(type) { - case MultiMerger: - newValue, success = v.PartialMergeMulti(key, operands) - case PartialMerger: - leftOperand := operands[0] - for i := 1; i < int(cNumOperands); i++ { - newValue, success = v.PartialMerge(key, leftOperand, operands[i]) - if !success { - break - } - leftOperand = newValue + leftOperand := operands[0] + for i := 1; i < int(cNumOperands); i++ { + newValue, success = merger.PartialMerge(key, leftOperand, operands[i]) + if !success { + break } - default: - success = false + leftOperand = newValue } newValueLen := len(newValue) diff --git a/merge_operator_test.go b/merge_operator_test.go index 9dad6f78..fd7e0887 100644 --- a/merge_operator_test.go +++ b/merge_operator_test.go @@ -40,146 +40,15 @@ func TestMergeOperator(t *testing.T) { ensure.DeepEqual(t, v1.Data(), givenMerged) } -func TestPartialMergeOperator(t *testing.T) { - var ( - givenKey = []byte("hello") - startingVal = []byte("foo") - mergeVal1 = []byte("bar") - mergeVal2 = []byte("baz") - fMergeResult = []byte("foobarbaz") - pMergeResult = []byte("barbaz") - ) - - merger := &mockMergePartialOperator{ - fullMerge: func(key, existingValue []byte, operands [][]byte) ([]byte, bool) { - ensure.DeepEqual(&fatalAsError{t}, key, givenKey) - ensure.DeepEqual(&fatalAsError{t}, existingValue, startingVal) - ensure.DeepEqual(&fatalAsError{t}, operands[0], pMergeResult) - return fMergeResult, true - }, - partialMerge: func(key, leftOperand, rightOperand []byte) ([]byte, bool) { - ensure.DeepEqual(&fatalAsError{t}, key, givenKey) - ensure.DeepEqual(&fatalAsError{t}, leftOperand, mergeVal1) - ensure.DeepEqual(&fatalAsError{t}, rightOperand, mergeVal2) - return pMergeResult, true - }, - } - db := newTestDB(t, "TestMergeOperator", func(opts *Options) { - opts.SetMergeOperator(merger) - }) - defer db.Close() - - wo := NewDefaultWriteOptions() - defer wo.Destroy() - - // insert a starting value and compact to trigger merges - ensure.Nil(t, db.Put(wo, givenKey, startingVal)) - - // trigger a compaction to ensure that a merge is performed - db.CompactRange(Range{nil, nil}) - - // we expect these two operands to be passed to merge partial - ensure.Nil(t, db.Merge(wo, givenKey, mergeVal1)) - ensure.Nil(t, db.Merge(wo, givenKey, mergeVal2)) - - // trigger a compaction to ensure that a - // partial and full merge are performed - db.CompactRange(Range{nil, nil}) - - ro := NewDefaultReadOptions() - v1, err := db.Get(ro, givenKey) - defer v1.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v1.Data(), fMergeResult) - -} - -func TestMergeMultiOperator(t *testing.T) { - var ( - givenKey = []byte("hello") - startingVal = []byte("foo") - mergeVal1 = []byte("bar") - mergeVal2 = []byte("baz") - fMergeResult = []byte("foobarbaz") - pMergeResult = []byte("barbaz") - ) - - merger := &mockMergeMultiOperator{ - fullMerge: func(key, existingValue []byte, operands [][]byte) ([]byte, bool) { - ensure.DeepEqual(&fatalAsError{t}, key, givenKey) - ensure.DeepEqual(&fatalAsError{t}, existingValue, startingVal) - ensure.DeepEqual(&fatalAsError{t}, operands[0], pMergeResult) - return fMergeResult, true - }, - partialMergeMulti: func(key []byte, operands [][]byte) ([]byte, bool) { - ensure.DeepEqual(&fatalAsError{t}, key, givenKey) - ensure.DeepEqual(&fatalAsError{t}, operands[0], mergeVal1) - ensure.DeepEqual(&fatalAsError{t}, operands[1], mergeVal2) - return pMergeResult, true - }, - } - db := newTestDB(t, "TestMergeOperator", func(opts *Options) { - opts.SetMergeOperator(merger) - }) - defer db.Close() - - wo := NewDefaultWriteOptions() - defer wo.Destroy() - - // insert a starting value and compact to trigger merges - ensure.Nil(t, db.Put(wo, givenKey, startingVal)) - - // trigger a compaction to ensure that a merge is performed - db.CompactRange(Range{nil, nil}) - - // we expect these two operands to be passed to merge multi - ensure.Nil(t, db.Merge(wo, givenKey, mergeVal1)) - ensure.Nil(t, db.Merge(wo, givenKey, mergeVal2)) - - // trigger a compaction to ensure that a - // partial and full merge are performed - db.CompactRange(Range{nil, nil}) - - ro := NewDefaultReadOptions() - v1, err := db.Get(ro, givenKey) - defer v1.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v1.Data(), fMergeResult) - -} - -// Mock Objects type mockMergeOperator struct { - fullMerge func(key, existingValue []byte, operands [][]byte) ([]byte, bool) -} - -func (m *mockMergeOperator) Name() string { return "gorocksdb.test" } -func (m *mockMergeOperator) FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) { - return m.fullMerge(key, existingValue, operands) -} - -type mockMergeMultiOperator struct { - fullMerge func(key, existingValue []byte, operands [][]byte) ([]byte, bool) - partialMergeMulti func(key []byte, operands [][]byte) ([]byte, bool) -} - -func (m *mockMergeMultiOperator) Name() string { return "gorocksdb.multi" } -func (m *mockMergeMultiOperator) FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) { - return m.fullMerge(key, existingValue, operands) -} -func (m *mockMergeMultiOperator) PartialMergeMulti(key []byte, operands [][]byte) ([]byte, bool) { - return m.partialMergeMulti(key, operands) -} - -type mockMergePartialOperator struct { fullMerge func(key, existingValue []byte, operands [][]byte) ([]byte, bool) partialMerge func(key, leftOperand, rightOperand []byte) ([]byte, bool) } -func (m *mockMergePartialOperator) Name() string { return "gorocksdb.partial" } -func (m *mockMergePartialOperator) FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) { +func (m *mockMergeOperator) Name() string { return "gorocksdb.test" } +func (m *mockMergeOperator) FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) { return m.fullMerge(key, existingValue, operands) } -func (m *mockMergePartialOperator) PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) { +func (m *mockMergeOperator) PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) { return m.partialMerge(key, leftOperand, rightOperand) } diff --git a/options.go b/options.go index 07000215..0f1376ad 100644 --- a/options.go +++ b/options.go @@ -60,15 +60,6 @@ const ( FatalInfoLogLevel = InfoLogLevel(4) ) -type WALRecoveryMode int - -const ( - TolerateCorruptedTailRecordsRecovery = WALRecoveryMode(0) - AbsoluteConsistencyRecovery = WALRecoveryMode(1) - PointInTimeRecovery = WALRecoveryMode(2) - SkipAnyCorruptedRecordsRecovery = WALRecoveryMode(3) -) - // Options represent all of the available options when opening a database with Open. type Options struct { c *C.rocksdb_options_t @@ -111,7 +102,7 @@ func GetOptionsFromString(base *Options, optStr string) (*Options, error) { newOpt := NewDefaultOptions() C.rocksdb_get_options_from_string(base.c, cOptStr, newOpt.c, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } @@ -342,7 +333,7 @@ func (opts *Options) OptimizeUniversalStyleCompaction(memtable_memory_budget uin // so you may wish to adjust this parameter to control memory usage. // Also, a larger write buffer will result in a longer recovery time // the next time the database is opened. -// Default: 64MB +// Default: 4MB func (opts *Options) SetWriteBufferSize(value int) { C.rocksdb_options_set_write_buffer_size(opts.c, C.size_t(value)) } @@ -810,14 +801,6 @@ func (opts *Options) SetDisableAutoCompactions(value bool) { C.rocksdb_options_set_disable_auto_compactions(opts.c, C.int(btoi(value))) } -// SetWALRecoveryMode sets the recovery mode -// -// Recovery mode to control the consistency while replaying WAL -// Default: TolerateCorruptedTailRecordsRecovery -func (opts *Options) SetWALRecoveryMode(mode WALRecoveryMode) { - C.rocksdb_options_set_wal_recovery_mode(opts.c, C.int(mode)) -} - // SetWALTtlSeconds sets the WAL ttl in seconds. // // The following two options affect how archived logs will be deleted. @@ -846,13 +829,6 @@ func (opts *Options) SetWalSizeLimitMb(value uint64) { C.rocksdb_options_set_WAL_size_limit_MB(opts.c, C.uint64_t(value)) } -// SetEnablePipelinedWrite enables pipelined write -// -// Default: false -func (opts *Options) SetEnablePipelinedWrite(value bool) { - C.rocksdb_options_set_enable_pipelined_write(opts.c, boolToChar(value)) -} - // SetManifestPreallocationSize sets the number of bytes // to preallocate (via fallocate) the manifest files. // @@ -991,7 +967,7 @@ func (opts *Options) SetFIFOCompactionOptions(value *FIFOCompactionOptions) { // GetStatisticsString returns the statistics as a string. func (opts *Options) GetStatisticsString() string { sString := C.rocksdb_options_statistics_get_string(opts.c) - defer C.rocksdb_free(unsafe.Pointer(sString)) + defer C.free(unsafe.Pointer(sString)) return C.GoString(sString) } @@ -1166,44 +1142,15 @@ func (opts *Options) SetAllowIngestBehind(value bool) { C.rocksdb_options_set_allow_ingest_behind(opts.c, boolToChar(value)) } -// SetMemTablePrefixBloomSizeRatio sets memtable_prefix_bloom_size_ratio -// if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, -// create prefix bloom for memtable with the size of -// write_buffer_size * memtable_prefix_bloom_size_ratio. -// If it is larger than 0.25, it is sanitized to 0.25. -// -// Default: 0 (disable) -func (opts *Options) SetMemTablePrefixBloomSizeRatio(value float64) { - C.rocksdb_options_set_memtable_prefix_bloom_size_ratio(opts.c, C.double(value)) -} - -// SetOptimizeFiltersForHits sets optimize_filters_for_hits -// This flag specifies that the implementation should optimize the filters -// mainly for cases where keys are found rather than also optimize for keys -// missed. This would be used in cases where the application knows that -// there are very few misses or the performance in the case of misses is not -// important. -// -// For now, this flag allows us to not store filters for the last level i.e -// the largest level which contains data of the LSM store. For keys which -// are hits, the filters in this level are not useful because we will search -// for the data anyway. NOTE: the filters in other levels are still useful -// even for key hit because they tell us whether to look in that level or go -// to the higher level. -// -// Default: false -func (opts *Options) SetOptimizeFiltersForHits(value bool) { - C.rocksdb_options_set_optimize_filters_for_hits(opts.c, C.int(btoi(value))) -} - // Destroy deallocates the Options object. func (opts *Options) Destroy() { C.rocksdb_options_destroy(opts.c) if opts.ccmp != nil { C.rocksdb_comparator_destroy(opts.ccmp) } - // don't destroy the opts.cst here, it has already been - // associated with a PrefixExtractor and this will segfault + if opts.cst != nil { + C.rocksdb_slicetransform_destroy(opts.cst) + } if opts.ccf != nil { C.rocksdb_compactionfilter_destroy(opts.ccf) } diff --git a/options_block_based_table.go b/options_block_based_table.go index 80244132..e91bed01 100644 --- a/options_block_based_table.go +++ b/options_block_based_table.go @@ -56,14 +56,6 @@ func (opts *BlockBasedTableOptions) SetCacheIndexAndFilterBlocks(value bool) { C.rocksdb_block_based_options_set_cache_index_and_filter_blocks(opts.c, boolToChar(value)) } -// SetCacheIndexAndFilterBlocksWithHighPriority sets cache index and filter -// blocks with high priority (if cache_index_and_filter_blocks is enabled). -// If set to true, depending on implementation of block cache, -// index and filter blocks may be less likely to be evicted than data blocks. -func (opts *BlockBasedTableOptions) SetCacheIndexAndFilterBlocksWithHighPriority(value bool) { - C.rocksdb_block_based_options_set_cache_index_and_filter_blocks_with_high_priority(opts.c, boolToChar(value)) -} - // SetPinL0FilterAndIndexBlocksInCache sets cache_index_and_filter_blocks. // If is true and the below is true (hash_index_allow_collision), then // filter and index blocks are stored in the cache, but a reference is @@ -73,15 +65,6 @@ func (opts *BlockBasedTableOptions) SetPinL0FilterAndIndexBlocksInCache(value bo C.rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(opts.c, boolToChar(value)) } -// SetPinTopLevelIndexAndFilter set that if cache_index_and_filter_blocks is true, then -// the top-level index of partitioned filter and index blocks are stored in -// the cache, but a reference is held in the "table reader" object so the -// blocks are pinned and only evicted from cache when the table reader is -// freed. This is not limited to l0 in LSM tree. -func (opts *BlockBasedTableOptions) SetPinTopLevelIndexAndFilter(value bool) { - C.rocksdb_block_based_options_set_pin_top_level_index_and_filter(opts.c, boolToChar(value)) -} - // SetBlockSize sets the approximate size of user data packed per block. // Note that the block size specified here corresponds opts uncompressed data. // The actual size of the unit read from disk may be smaller if @@ -111,39 +94,6 @@ func (opts *BlockBasedTableOptions) SetBlockRestartInterval(blockRestartInterval C.rocksdb_block_based_options_set_block_restart_interval(opts.c, C.int(blockRestartInterval)) } -// SetIndexBlockRestartInterval is the same as SetBlockRestartInterval but used for the index block. -// Default: 1 -func (opts *BlockBasedTableOptions) SetIndexBlockRestartInterval(indexBlockRestartInterval int) { - C.rocksdb_block_based_options_set_index_block_restart_interval(opts.c, C.int(indexBlockRestartInterval)) -} - -// SetMetadataBlockSize sets the block size for partitioned metadata. -// Currently applied to indexes when -// kTwoLevelIndexSearch is used and to filters when partition_filters is used. -// Note: Since in the current implementation the filters and index partitions -// are aligned, an index/filter block is created when either index or filter -// block size reaches the specified limit. -// Note: this limit is currently applied to only index blocks; a filter -// partition is cut right after an index block is cut -// Default: 4096 -func (opts *BlockBasedTableOptions) SetMetadataBlockSize(metadataBlockSize uint64) { - C.rocksdb_block_based_options_set_metadata_block_size(opts.c, C.uint64_t(metadataBlockSize)) -} - -// SetPartitionFilters sets using partitioned full filters for each SST file. -// This option is incompatible with block-based filters. -// Note: currently this option requires kTwoLevelIndexSearch to be set as well. -// Default: false -func (opts *BlockBasedTableOptions) SetPartitionFilters(value bool) { - C.rocksdb_block_based_options_set_partition_filters(opts.c, boolToChar(value)) -} - -// SetUseDeltaEncoding sets using delta encoding to compress keys in blocks. -// ReadOptions::pin_data requires this option to be disabled. -func (opts *BlockBasedTableOptions) SetUseDeltaEncoding(value bool) { - C.rocksdb_block_based_options_set_use_delta_encoding(opts.c, boolToChar(value)) -} - // SetFilterPolicy sets the filter policy opts reduce disk reads. // Many applications will benefit from passing the result of // NewBloomFilterPolicy() here. @@ -191,35 +141,6 @@ func (opts *BlockBasedTableOptions) SetWholeKeyFiltering(value bool) { C.rocksdb_block_based_options_set_whole_key_filtering(opts.c, boolToChar(value)) } -// SetFormatVersion sets the format version. -// We currently have five versions: -// 0 -- This version is currently written out by all RocksDB's versions by -// default. Can be read by really old RocksDB's. Doesn't support changing -// checksum (default is CRC32). -// 1 -- Can be read by RocksDB's versions since 3.0. Supports non-default -// checksum, like xxHash. It is written by RocksDB when -// BlockBasedTableOptions::checksum is something other than kCRC32c. (version -// 0 is silently upconverted) -// 2 -- Can be read by RocksDB's versions since 3.10. Changes the way we -// encode compressed blocks with LZ4, BZip2 and Zlib compression. If you -// don't plan to run RocksDB before version 3.10, you should probably use -// this. -// 3 -- Can be read by RocksDB's versions since 5.15. Changes the way we -// encode the keys in index blocks. If you don't plan to run RocksDB before -// version 5.15, you should probably use this. -// This option only affects newly written tables. When reading existing -// tables, the information about version is read from the footer. -// 4 -- Can be read by RocksDB's versions since 5.16. Changes the way we -// encode the values in index blocks. If you don't plan to run RocksDB before -// version 5.16 and you are using index_block_restart_interval > 1, you should -// probably use this as it would reduce the index size. -// This option only affects newly written tables. When reading existing -// tables, the information about version is read from the footer. -// Default: 2 -func (opts *BlockBasedTableOptions) SetFormatVersion(version int) { - C.rocksdb_block_based_options_set_format_version(opts.c, C.int(version)) -} - // SetIndexType sets the index type used for this table. // kBinarySearch: // A space efficient index block that is optimized for diff --git a/options_read.go b/options_read.go index 5f93428d..a047c7ce 100644 --- a/options_read.go +++ b/options_read.go @@ -50,17 +50,6 @@ func (opts *ReadOptions) SetVerifyChecksums(value bool) { C.rocksdb_readoptions_set_verify_checksums(opts.c, boolToChar(value)) } -// SetPrefixSameAsStart Enforce that the iterator only iterates over the same -// prefix as the seek. -// This option is effective only for prefix seeks, i.e. prefix_extractor is -// non-null for the column family and total_order_seek is false. Unlike -// iterate_upper_bound, prefix_same_as_start only works within a prefix -// but in both directions. -// Default: false -func (opts *ReadOptions) SetPrefixSameAsStart(value bool) { - C.rocksdb_readoptions_set_prefix_same_as_start(opts.c, boolToChar(value)) -} - // SetFillCache specify whether the "data block"/"index block"/"filter block" // read for this iteration should be cached in memory? // Callers may wish to set this field to false for bulk scans. diff --git a/slice.go b/slice.go index 707a1f2e..b450daa3 100644 --- a/slice.go +++ b/slice.go @@ -51,7 +51,7 @@ func (s *Slice) Exists() bool { // Free frees the slice data. func (s *Slice) Free() { if !s.freed { - C.rocksdb_free(unsafe.Pointer(s.data)) + C.free(unsafe.Pointer(s.data)) s.freed = true } } diff --git a/slice_transform.go b/slice_transform.go index 8b9b2362..e66e4d84 100644 --- a/slice_transform.go +++ b/slice_transform.go @@ -23,11 +23,6 @@ func NewFixedPrefixTransform(prefixLen int) SliceTransform { return NewNativeSliceTransform(C.rocksdb_slicetransform_create_fixed_prefix(C.size_t(prefixLen))) } -// NewNoopPrefixTransform creates a new no-op prefix transform. -func NewNoopPrefixTransform() SliceTransform { - return NewNativeSliceTransform(C.rocksdb_slicetransform_create_noop()) -} - // NewNativeSliceTransform creates a SliceTransform object. func NewNativeSliceTransform(c *C.rocksdb_slicetransform_t) SliceTransform { return nativeSliceTransform{c} diff --git a/slice_transform_test.go b/slice_transform_test.go index d60c7326..1c551183 100644 --- a/slice_transform_test.go +++ b/slice_transform_test.go @@ -35,13 +35,6 @@ func TestFixedPrefixTransformOpen(t *testing.T) { defer db.Close() } -func TestNewNoopPrefixTransform(t *testing.T) { - db := newTestDB(t, "TestNewNoopPrefixTransform", func(opts *Options) { - opts.SetPrefixExtractor(NewNoopPrefixTransform()) - }) - defer db.Close() -} - type testSliceTransform struct { initiated bool } diff --git a/sst_file_writer.go b/sst_file_writer.go index 54f2c139..0f4689c2 100644 --- a/sst_file_writer.go +++ b/sst_file_writer.go @@ -30,7 +30,7 @@ func (w *SSTFileWriter) Open(path string) error { defer C.free(unsafe.Pointer(cPath)) C.rocksdb_sstfilewriter_open(w.c, cPath, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -44,7 +44,7 @@ func (w *SSTFileWriter) Add(key, value []byte) error { var cErr *C.char C.rocksdb_sstfilewriter_add(w.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -55,7 +55,7 @@ func (w *SSTFileWriter) Finish() error { var cErr *C.char C.rocksdb_sstfilewriter_finish(w.c, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/staticflag_linux.go b/staticflag_linux.go index 3af044ef..6653c498 100644 --- a/staticflag_linux.go +++ b/staticflag_linux.go @@ -2,5 +2,5 @@ package gorocksdb -// #cgo LDFLAGS: -l:librocksdb.a -l:libstdc++.a -lm -ldl +// #cgo LDFLAGS: -l:librocksdb.a -l:libstdc++.a -l:libz.a -l:libbz2.a -l:libsnappy.a -lm import "C" diff --git a/transaction.go b/transaction.go index 67c9ef09..49a04bd7 100644 --- a/transaction.go +++ b/transaction.go @@ -26,7 +26,7 @@ func (transaction *Transaction) Commit() error { ) C.rocksdb_transaction_commit(transaction.c, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -40,7 +40,7 @@ func (transaction *Transaction) Rollback() error { C.rocksdb_transaction_rollback(transaction.c, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -57,7 +57,7 @@ func (transaction *Transaction) Get(opts *ReadOptions, key []byte) (*Slice, erro transaction.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr, ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewSlice(cValue, cValLen), nil @@ -74,7 +74,7 @@ func (transaction *Transaction) GetForUpdate(opts *ReadOptions, key []byte) (*Sl transaction.c, opts.c, cKey, C.size_t(len(key)), &cValLen, C.uchar(byte(1)) /*exclusive*/, &cErr, ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewSlice(cValue, cValLen), nil @@ -91,7 +91,7 @@ func (transaction *Transaction) Put(key, value []byte) error { transaction.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr, ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -105,7 +105,7 @@ func (transaction *Transaction) Delete(key []byte) error { ) C.rocksdb_transaction_delete(transaction.c, cKey, C.size_t(len(key)), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil diff --git a/transactiondb.go b/transactiondb.go index cfdeac9c..f5d2fd70 100644 --- a/transactiondb.go +++ b/transactiondb.go @@ -30,7 +30,7 @@ func OpenTransactionDb( db := C.rocksdb_transactiondb_open( opts.c, transactionDBOpts.c, cName, &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return &TransactionDB{ @@ -83,7 +83,7 @@ func (db *TransactionDB) Get(opts *ReadOptions, key []byte) (*Slice, error) { db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr, ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } return NewSlice(cValue, cValLen), nil @@ -100,7 +100,7 @@ func (db *TransactionDB) Put(opts *WriteOptions, key, value []byte) error { db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr, ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -114,7 +114,7 @@ func (db *TransactionDB) Delete(opts *WriteOptions, key []byte) error { ) C.rocksdb_transactiondb_delete(db.c, opts.c, cKey, C.size_t(len(key)), &cErr) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) } return nil @@ -129,7 +129,7 @@ func (db *TransactionDB) NewCheckpoint() (*Checkpoint, error) { db.c, &cErr, ) if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) + defer C.free(unsafe.Pointer(cErr)) return nil, errors.New(C.GoString(cErr)) } diff --git a/util.go b/util.go index c989e5a5..f05cdcdd 100644 --- a/util.go +++ b/util.go @@ -1,5 +1,4 @@ package gorocksdb - // #include // #include diff --git a/wal_iterator.go b/wal_iterator.go deleted file mode 100755 index 7805d7c9..00000000 --- a/wal_iterator.go +++ /dev/null @@ -1,49 +0,0 @@ -package gorocksdb - -// #include -// #include "rocksdb/c.h" -import "C" -import ( - "errors" - "unsafe" -) - -type WalIterator struct { - c *C.rocksdb_wal_iterator_t -} - -func NewNativeWalIterator(c unsafe.Pointer) *WalIterator { - return &WalIterator{(*C.rocksdb_wal_iterator_t)(c)} -} - -func (iter *WalIterator) Valid() bool { - return C.rocksdb_wal_iter_valid(iter.c) != 0 -} - -func (iter *WalIterator) Next() { - C.rocksdb_wal_iter_next(iter.c) -} - -func (iter *WalIterator) Err() error { - var cErr *C.char - C.rocksdb_wal_iter_status(iter.c, &cErr) - if cErr != nil { - defer C.rocksdb_free(unsafe.Pointer(cErr)) - return errors.New(C.GoString(cErr)) - } - return nil -} - -func (iter *WalIterator) Destroy() { - C.rocksdb_wal_iter_destroy(iter.c) - iter.c = nil -} - -// C.rocksdb_wal_iter_get_batch in the official rocksdb c wrapper has memory leak -// see https://github.com/facebook/rocksdb/pull/5515 -// https://github.com/facebook/rocksdb/issues/5536 -func (iter *WalIterator) GetBatch() (*WriteBatch, uint64) { - var cSeq C.uint64_t - cB := C.rocksdb_wal_iter_get_batch(iter.c, &cSeq) - return NewNativeWriteBatch(cB), uint64(cSeq) -} diff --git a/write_batch.go b/write_batch.go index 6e847c65..55dce05f 100644 --- a/write_batch.go +++ b/write_batch.go @@ -79,12 +79,6 @@ func (wb *WriteBatch) PutManyCF(cf *ColumnFamilyHandle, keys, values [][]byte) e return nil } -// Append a blob of arbitrary size to the records in this batch. -func (wb *WriteBatch) PutLogData(blob []byte) { - cBlob := byteToChar(blob) - C.rocksdb_writebatch_put_log_data(wb.c, cBlob, C.size_t(len(blob))) -} - // Merge queues a merge of "value" with the existing value of "key". func (wb *WriteBatch) Merge(key, value []byte) { cKey := byteToChar(key) @@ -112,21 +106,6 @@ func (wb *WriteBatch) DeleteCF(cf *ColumnFamilyHandle, key []byte) { C.rocksdb_writebatch_delete_cf(wb.c, cf.c, cKey, C.size_t(len(key))) } -// DeleteRange deletes keys that are between [startKey, endKey) -func (wb *WriteBatch) DeleteRange(startKey []byte, endKey []byte) { - cStartKey := byteToChar(startKey) - cEndKey := byteToChar(endKey) - C.rocksdb_writebatch_delete_range(wb.c, cStartKey, C.size_t(len(startKey)), cEndKey, C.size_t(len(endKey))) -} - -// DeleteRangeCF deletes keys that are between [startKey, endKey) and -// belong to a given column family -func (wb *WriteBatch) DeleteRangeCF(cf *ColumnFamilyHandle, startKey []byte, endKey []byte) { - cStartKey := byteToChar(startKey) - cEndKey := byteToChar(endKey) - C.rocksdb_writebatch_delete_range_cf(wb.c, cf.c, cStartKey, C.size_t(len(startKey)), cEndKey, C.size_t(len(endKey))) -} - // Data returns the serialized version of this batch. func (wb *WriteBatch) Data() []byte { var cSize C.size_t diff --git a/write_batch_test.go b/write_batch_test.go index f1c7a918..f5b15555 100644 --- a/write_batch_test.go +++ b/write_batch_test.go @@ -39,18 +39,6 @@ func TestWriteBatch(t *testing.T) { defer v2.Free() ensure.Nil(t, err) ensure.True(t, v2.Data() == nil) - - // DeleteRange test - wb.Clear() - wb.DeleteRange(givenKey1, givenKey2) - - // perform the batch - ensure.Nil(t, db.Write(wo, wb)) - - v1, err = db.Get(ro, givenKey1) - defer v1.Free() - ensure.Nil(t, err) - ensure.True(t, v1.Data() == nil) } func TestWriteBatchPutMany(t *testing.T) { From e5b71969e4b07e60ea8504c12d19a111eee6b80f Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Thu, 25 Apr 2019 17:40:19 +0800 Subject: [PATCH 46/55] DeleteRange DeleteRangeCF --- write_batch.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/write_batch.go b/write_batch.go index 55dce05f..9f99793f 100644 --- a/write_batch.go +++ b/write_batch.go @@ -106,6 +106,18 @@ func (wb *WriteBatch) DeleteCF(cf *ColumnFamilyHandle, key []byte) { C.rocksdb_writebatch_delete_cf(wb.c, cf.c, cKey, C.size_t(len(key))) } +func (wb *WriteBatch) DeleteRange(beginKey []byte, endKey []byte) { + cBeginKey := byteToChar(beginKey) + cEndKey := byteToChar(endKey) + C.rocksdb_writebatch_delete_range(wb.c, cBeginKey, C.size_t(len(beginKey)), cEndKey, C.size_t(len(endKey))) +} + +func (wb *WriteBatch) DeleteRangeCF(cf *ColumnFamilyHandle, beginKey []byte, endKey []byte) { + cBeginKey := byteToChar(beginKey) + cEndKey := byteToChar(endKey) + C.rocksdb_writebatch_delete_range_cf(wb.c, cf.c, cBeginKey, C.size_t(len(beginKey)), cEndKey, C.size_t(len(endKey))) +} + // Data returns the serialized version of this batch. func (wb *WriteBatch) Data() []byte { var cSize C.size_t From d5717da2f3f1573f1aea16afd1e88e7cd87d7bbe Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Fri, 24 May 2019 12:42:27 +0800 Subject: [PATCH 47/55] DeleteFileInRange --- db.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/db.go b/db.go index e3c128ce..09a79fb7 100644 --- a/db.go +++ b/db.go @@ -766,6 +766,26 @@ func (db *DB) DeleteFile(name string) { C.rocksdb_delete_file(db.c, cName) } +func (db *DB) DeleteFileInRange(beginKey []byte, limitKey []byte) error { + cBeginKey := byteToChar(beginKey) + cLimitKey := byteToChar(limitKey) + + var cErr *C.char + + C.rocksdb_delete_file_in_range( + db.c, + cBeginKey, C.size_t(len(beginKey)), + cLimitKey, C.size_t(len(limitKey)), + &cErr, + ) + + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return errors.New(C.GoString(cErr)) + } + return nil +} + // IngestExternalFile loads a list of external SST files. func (db *DB) IngestExternalFile(filePaths []string, opts *IngestExternalFileOptions) error { cFilePaths := make([]*C.char, len(filePaths)) From 561e7ca50fb217321842547ce8215c93ae0e5c9c Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Mon, 3 Jun 2019 15:10:38 +0800 Subject: [PATCH 48/55] DeleteFileInRange and comments --- db.go | 34 +++++++++++++++++++++++++++++----- write_batch.go | 15 +++++++++------ 2 files changed, 38 insertions(+), 11 deletions(-) diff --git a/db.go b/db.go index 09a79fb7..9a8d99b1 100644 --- a/db.go +++ b/db.go @@ -766,16 +766,40 @@ func (db *DB) DeleteFile(name string) { C.rocksdb_delete_file(db.c, cName) } -func (db *DB) DeleteFileInRange(beginKey []byte, limitKey []byte) error { - cBeginKey := byteToChar(beginKey) - cLimitKey := byteToChar(limitKey) +// DeleteFileInRange deletes SST files that contain keys between the Range, [r.Start, limitKey] +func (db *DB) DeleteFileInRange(r Range) error { + cStartKey := byteToChar(r.Start) + cLimitKey := byteToChar(r.Limit) var cErr *C.char C.rocksdb_delete_file_in_range( db.c, - cBeginKey, C.size_t(len(beginKey)), - cLimitKey, C.size_t(len(limitKey)), + cStartKey, C.size_t(len(r.Start)), + cLimitKey, C.size_t(len(r.Limit)), + &cErr, + ) + + if cErr != nil { + defer C.free(unsafe.Pointer(cErr)) + return errors.New(C.GoString(cErr)) + } + return nil +} + +// DeleteFileInRangeCF deletes SST files that contain keys between the Range, [r.Start, r.Limit], and +// belong to a given column family +func (db *DB) DeleteFileInRangeCF(cf *ColumnFamilyHandle, r Range) error { + cStartKey := byteToChar(r.Start) + cLimitKey := byteToChar(r.Limit) + + var cErr *C.char + + C.rocksdb_delete_file_in_range_cf( + db.c, + cf.c, + cStartKey, C.size_t(len(r.Start)), + cLimitKey, C.size_t(len(r.Limit)), &cErr, ) diff --git a/write_batch.go b/write_batch.go index 9f99793f..33c1043a 100644 --- a/write_batch.go +++ b/write_batch.go @@ -106,16 +106,19 @@ func (wb *WriteBatch) DeleteCF(cf *ColumnFamilyHandle, key []byte) { C.rocksdb_writebatch_delete_cf(wb.c, cf.c, cKey, C.size_t(len(key))) } -func (wb *WriteBatch) DeleteRange(beginKey []byte, endKey []byte) { - cBeginKey := byteToChar(beginKey) +// DeleteRange deletes keys that are between [startKey, endKey) +func (wb *WriteBatch) DeleteRange(startKey []byte, endKey []byte) { + cStartKey := byteToChar(startKey) cEndKey := byteToChar(endKey) - C.rocksdb_writebatch_delete_range(wb.c, cBeginKey, C.size_t(len(beginKey)), cEndKey, C.size_t(len(endKey))) + C.rocksdb_writebatch_delete_range(wb.c, cStartKey, C.size_t(len(startKey)), cEndKey, C.size_t(len(endKey))) } -func (wb *WriteBatch) DeleteRangeCF(cf *ColumnFamilyHandle, beginKey []byte, endKey []byte) { - cBeginKey := byteToChar(beginKey) +// DeleteRangeCF deletes keys that are between [startKey, endKey) and +// belong to a given column family +func (wb *WriteBatch) DeleteRangeCF(cf *ColumnFamilyHandle, startKey []byte, endKey []byte) { + cStartKey := byteToChar(startKey) cEndKey := byteToChar(endKey) - C.rocksdb_writebatch_delete_range_cf(wb.c, cf.c, cBeginKey, C.size_t(len(beginKey)), cEndKey, C.size_t(len(endKey))) + C.rocksdb_writebatch_delete_range_cf(wb.c, cf.c, cStartKey, C.size_t(len(startKey)), cEndKey, C.size_t(len(endKey))) } // Data returns the serialized version of this batch. From 4cccc908455c0e7e184c4700f720d21ea01a867c Mon Sep 17 00:00:00 2001 From: linyuanjin Date: Mon, 3 Jun 2019 15:22:43 +0800 Subject: [PATCH 49/55] add test for DeleteRange --- write_batch_test.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/write_batch_test.go b/write_batch_test.go index f5b15555..f1c7a918 100644 --- a/write_batch_test.go +++ b/write_batch_test.go @@ -39,6 +39,18 @@ func TestWriteBatch(t *testing.T) { defer v2.Free() ensure.Nil(t, err) ensure.True(t, v2.Data() == nil) + + // DeleteRange test + wb.Clear() + wb.DeleteRange(givenKey1, givenKey2) + + // perform the batch + ensure.Nil(t, db.Write(wo, wb)) + + v1, err = db.Get(ro, givenKey1) + defer v1.Free() + ensure.Nil(t, err) + ensure.True(t, v1.Data() == nil) } func TestWriteBatchPutMany(t *testing.T) { From f708deb0a97ab182610b1d6060f954c9ee2a51b7 Mon Sep 17 00:00:00 2001 From: Vova Maksimchuk Date: Mon, 16 Aug 2021 07:17:21 +0200 Subject: [PATCH 50/55] Add test dockerfile --- test.Dockerfile | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 test.Dockerfile diff --git a/test.Dockerfile b/test.Dockerfile new file mode 100644 index 00000000..448d87cd --- /dev/null +++ b/test.Dockerfile @@ -0,0 +1,25 @@ +# This dockerile might be helpful for running gorocksdb tests in docker +# Example: +# > docker build --ssh default -f test.Dockerfile -t gorocksdb-test . +# > docker run -it --rm -v $(pwd):/go/gorocksdb --workdir /go/gorocksdb gorocksdb-test go test -v +FROM golang:1.16.7-alpine3.14 + +ENV GO111MODULE="auto" + +RUN apk add --no-cache openssh-client zlib-dev bzip2-dev lz4-dev snappy-dev zstd-dev gflags-dev +RUN apk add --no-cache build-base linux-headers git bash perl wget g++ automake + +RUN mkdir -p ~/.ssh/ && ssh-keyscan github.com > ~/.ssh/known_hosts + +RUN --mount=type=ssh git clone git@github.com:GetStream/rocksdb.git /rocksdb +RUN cd /rocksdb && \ + git checkout broadwell && \ + DISABLE_JEMALLOC=1 make static_lib -j5 + +RUN go get github.com/facebookgo/ensure && \ + go get github.com/stretchr/testify/assert + +ENV CGO_CFLAGS="-I/rocksdb/include" +ENV CGO_LDFLAGS="-L/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" + +CMD ["bash"] \ No newline at end of file From e41569451ffbc41aa831c7d31ee82df7d396596c Mon Sep 17 00:00:00 2001 From: Vova Maksimchuk Date: Tue, 17 Aug 2021 08:34:54 +0200 Subject: [PATCH 51/55] Add go mods and compose --- Makefile | 7 +++++++ docker-compose.yml | 24 ++++++++++++++++++++++++ docker/Dockerfile.test | 19 +++++++++++++++++++ docker/entrypoint.sh | 17 +++++++++++++++++ go.mod | 10 ++++++++++ go.sum | 17 +++++++++++++++++ test.Dockerfile | 25 ------------------------- 7 files changed, 94 insertions(+), 25 deletions(-) create mode 100644 Makefile create mode 100644 docker-compose.yml create mode 100644 docker/Dockerfile.test create mode 100644 docker/entrypoint.sh create mode 100644 go.mod create mode 100644 go.sum delete mode 100644 test.Dockerfile diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..3e88fa09 --- /dev/null +++ b/Makefile @@ -0,0 +1,7 @@ +.PHONY: test-docker + +docker-clean: + @docker compose down -v --remove-orphans + +docker-test: + @docker compose build test && docker compose run --rm test go test -race=1 -v ./... diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..68a7abc2 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,24 @@ +version: "3" + +services: + test: + build: + context: . + dockerfile: docker/Dockerfile.test + image: test.gorocksdb.getstream.io + container_name: test.gorocksdb.getstream.io + profiles: + - test + command: + - go + - test + - -v + volumes: + - ~/.ssh:/root/.ssh:ro + - rocksdb.gorocksdb.getstream.io:/rocksdb + - gocache.gorocksdb.getstream.io:/root/.cache/go-build + - gopath.gorocksdb.getstream.io:/go +volumes: + rocksdb.gorocksdb.getstream.io: + gocache.gorocksdb.getstream.io: + gopath.gorocksdb.getstream.io: \ No newline at end of file diff --git a/docker/Dockerfile.test b/docker/Dockerfile.test new file mode 100644 index 00000000..0c363065 --- /dev/null +++ b/docker/Dockerfile.test @@ -0,0 +1,19 @@ +FROM golang:1.16.7-buster + +ENV GOBIN /go/bin + +RUN apt-get update && apt-get install -y openssh-client libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev libzstd-dev \ + liblz4-dev git-core curl wget perl bash g++ build-essential unzip + +RUN mkdir -p ~/.ssh/ && ssh-keyscan github.com > ~/.ssh/known_hosts +RUN git config --global url."git@github.com:".insteadOf "https://github.com/" + +ENV CGO_CFLAGS="-I/rocksdb/include" +ENV CGO_LDFLAGS="-L/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" + +ADD . /gorocksdb +WORKDIR /gorocksdb + +RUN chmod +x /gorocksdb/docker/entrypoint.sh + +ENTRYPOINT ["/gorocksdb/docker/entrypoint.sh"] \ No newline at end of file diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh new file mode 100644 index 00000000..34be2eda --- /dev/null +++ b/docker/entrypoint.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +set -e + +if [ ! -f /rocksdb/librocksdb.a ] +then + rm -rf /rocksdb/* + + git clone git@github.com:GetStream/rocksdb.git /rocksdb && \ + cd /rocksdb && \ + git checkout broadwell && \ + DISABLE_JEMALLOC=1 make static_lib -j5 +fi + +cd /gorocksdb + +exec "$@" \ No newline at end of file diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..739e40ad --- /dev/null +++ b/go.mod @@ -0,0 +1,10 @@ +module github.com/GetStream/gorocksdb + +go 1.16 + +require ( + github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c + github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect + github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect + github.com/stretchr/testify v1.7.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..76a9d22e --- /dev/null +++ b/go.sum @@ -0,0 +1,17 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test.Dockerfile b/test.Dockerfile deleted file mode 100644 index 448d87cd..00000000 --- a/test.Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -# This dockerile might be helpful for running gorocksdb tests in docker -# Example: -# > docker build --ssh default -f test.Dockerfile -t gorocksdb-test . -# > docker run -it --rm -v $(pwd):/go/gorocksdb --workdir /go/gorocksdb gorocksdb-test go test -v -FROM golang:1.16.7-alpine3.14 - -ENV GO111MODULE="auto" - -RUN apk add --no-cache openssh-client zlib-dev bzip2-dev lz4-dev snappy-dev zstd-dev gflags-dev -RUN apk add --no-cache build-base linux-headers git bash perl wget g++ automake - -RUN mkdir -p ~/.ssh/ && ssh-keyscan github.com > ~/.ssh/known_hosts - -RUN --mount=type=ssh git clone git@github.com:GetStream/rocksdb.git /rocksdb -RUN cd /rocksdb && \ - git checkout broadwell && \ - DISABLE_JEMALLOC=1 make static_lib -j5 - -RUN go get github.com/facebookgo/ensure && \ - go get github.com/stretchr/testify/assert - -ENV CGO_CFLAGS="-I/rocksdb/include" -ENV CGO_LDFLAGS="-L/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" - -CMD ["bash"] \ No newline at end of file From 03e680823a50a075429028f3b3ee849b63d861f0 Mon Sep 17 00:00:00 2001 From: Vova Maksimchuk Date: Tue, 17 Aug 2021 08:40:06 +0200 Subject: [PATCH 52/55] Add new line --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 68a7abc2..5d22b9a8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -21,4 +21,4 @@ services: volumes: rocksdb.gorocksdb.getstream.io: gocache.gorocksdb.getstream.io: - gopath.gorocksdb.getstream.io: \ No newline at end of file + gopath.gorocksdb.getstream.io: From 1512bd4b02c257836182eb6635f8ffbcfd5f1d18 Mon Sep 17 00:00:00 2001 From: Vova Maksimchuk Date: Tue, 17 Aug 2021 08:41:58 +0200 Subject: [PATCH 53/55] Fix makefile --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 3e88fa09..abf3f2db 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ -.PHONY: test-docker - +.PHONY: docker-clean docker-clean: @docker compose down -v --remove-orphans +.PHONY: docker-test docker-test: @docker compose build test && docker compose run --rm test go test -race=1 -v ./... From ac984c83aeb3c3a4c96cb1dda87d2b4edf86077c Mon Sep 17 00:00:00 2001 From: Vova Maksimchuk Date: Tue, 17 Aug 2021 10:09:07 +0200 Subject: [PATCH 54/55] Add cgocheck=2 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index abf3f2db..1fba90b4 100644 --- a/Makefile +++ b/Makefile @@ -4,4 +4,4 @@ docker-clean: .PHONY: docker-test docker-test: - @docker compose build test && docker compose run --rm test go test -race=1 -v ./... + @docker compose build test && docker compose run --rm -e GODEBUG=cgocheck=2 test go test -race=1 -v ./... From fafa98c847269e6bba71349cd01f3a63c934b48d Mon Sep 17 00:00:00 2001 From: Aditya Agarwal Date: Mon, 12 May 2025 14:14:52 +0200 Subject: [PATCH 55/55] update lib --- db.go | 10 ++- dynflag.go | 6 +- filter_policy.go | 2 +- gorocksdb.c | 9 +-- gorocksdb.h | 17 +++- iterator.go | 25 +++--- options.go | 145 ++++++++++++++++------------------- options_block_based_table.go | 7 +- 8 files changed, 105 insertions(+), 116 deletions(-) diff --git a/db.go b/db.go index 9a8d99b1..d8b7bddb 100644 --- a/db.go +++ b/db.go @@ -596,7 +596,8 @@ func (db *DB) GetApproximateSizes(ranges []Range) []uint64 { &cStartLens[0], &cLimits[0], &cLimitLens[0], - (*C.uint64_t)(&sizes[0])) + (*C.uint64_t)(&sizes[0]), + nil) return sizes } @@ -638,7 +639,8 @@ func (db *DB) GetApproximateSizesCF(cf *ColumnFamilyHandle, ranges []Range) []ui &cStartLens[0], &cLimits[0], &cLimitLens[0], - (*C.uint64_t)(&sizes[0])) + (*C.uint64_t)(&sizes[0]), + nil) return sizes } @@ -749,7 +751,7 @@ func (db *DB) DisableFileDeletions() error { // EnableFileDeletions enables file deletions for the database. func (db *DB) EnableFileDeletions(force bool) error { var cErr *C.char - C.rocksdb_enable_file_deletions(db.c, boolToChar(force), &cErr) + C.rocksdb_enable_file_deletions(db.c, &cErr) if cErr != nil { defer C.free(unsafe.Pointer(cErr)) return errors.New(C.GoString(cErr)) @@ -763,7 +765,7 @@ func (db *DB) EnableFileDeletions(force bool) error { func (db *DB) DeleteFile(name string) { cName := C.CString(name) defer C.free(unsafe.Pointer(cName)) - C.rocksdb_delete_file(db.c, cName) + //C.rocksdb_delete_file(db.c, cName) } // DeleteFileInRange deletes SST files that contain keys between the Range, [r.Start, limitKey] diff --git a/dynflag.go b/dynflag.go index 81909317..81fe93b5 100644 --- a/dynflag.go +++ b/dynflag.go @@ -1,6 +1,8 @@ -// +build !linux !static +//go:build !embed +// +build !embed package gorocksdb -// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy +// #cgo LDFLAGS: -L/Users/aditya/code/Keevo/.rocksdb-repo -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd +// #cgo CFLAGS: -I/Users/aditya/code/Keevo/.rocksdb-repo/include import "C" diff --git a/filter_policy.go b/filter_policy.go index ac57fd99..10c266ce 100644 --- a/filter_policy.go +++ b/filter_policy.go @@ -46,7 +46,7 @@ func (fp nativeFilterPolicy) Name() string { retur // FilterPolicy (like NewBloomFilterPolicy) that does not ignore // trailing spaces in keys. func NewBloomFilter(bitsPerKey int) FilterPolicy { - return NewNativeFilterPolicy(C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey))) + return NewNativeFilterPolicy(C.rocksdb_filterpolicy_create_bloom(C.double(bitsPerKey))) } // Hold references to filter policies. diff --git a/gorocksdb.c b/gorocksdb.c index 7a8e2a7a..255558d1 100644 --- a/gorocksdb.c +++ b/gorocksdb.c @@ -29,13 +29,8 @@ rocksdb_compactionfilter_t* gorocksdb_compactionfilter_create(uintptr_t idx) { /* Filter Policy */ rocksdb_filterpolicy_t* gorocksdb_filterpolicy_create(uintptr_t idx) { - return rocksdb_filterpolicy_create( - (void*)idx, - gorocksdb_destruct_handler, - (char* (*)(void*, const char* const*, const size_t*, int, size_t*))(gorocksdb_filterpolicy_create_filter), - (unsigned char (*)(void*, const char*, size_t, const char*, size_t))(gorocksdb_filterpolicy_key_may_match), - gorocksdb_filterpolicy_delete_filter, - (const char *(*)(void*))(gorocksdb_filterpolicy_name)); + // This function is deprecated and removed in RocksDB v10.2.1 + return NULL; } void gorocksdb_filterpolicy_delete_filter(void* state, const char* v, size_t s) { diff --git a/gorocksdb.h b/gorocksdb.h index 0a068a53..07e0acb0 100644 --- a/gorocksdb.h +++ b/gorocksdb.h @@ -1,4 +1,5 @@ #include +#include #include "rocksdb/c.h" typedef struct { @@ -10,10 +11,18 @@ typedef struct { } gorocksdb_many_keys_t; -typedef int bool; - -#define FALSE 0 -#define TRUE !FALSE +// Compression types +#define rocksdb_no_compression 0 +#define rocksdb_snappy_compression 1 +#define rocksdb_zlib_compression 2 +#define rocksdb_bz2_compression 3 +#define rocksdb_lz4_compression 4 +#define rocksdb_lz4hc_compression 5 +#define rocksdb_xpress_compression 6 +#define rocksdb_zstd_compression 7 + +#define FALSE false +#define TRUE true // This API provides convenient C wrapper functions for rocksdb client. diff --git a/iterator.go b/iterator.go index ae11f426..ac6b59bd 100644 --- a/iterator.go +++ b/iterator.go @@ -16,18 +16,17 @@ import ( // // For example: // -// it := db.NewIterator(readOpts) -// defer it.Close() +// it := db.NewIterator(readOpts) +// defer it.Close() // -// it.Seek([]byte("foo")) -// for ; it.Valid(); it.Next() { -// fmt.Printf("Key: %v Value: %v\n", it.Key().Data(), it.Value().Data()) -// } -// -// if err := it.Err(); err != nil { -// return err -// } +// it.Seek([]byte("foo")) +// for ; it.Valid(); it.Next() { +// fmt.Printf("Key: %v Value: %v\n", it.Key().Data(), it.Value().Data()) +// } // +// if err := it.Err(); err != nil { +// return err +// } type Iterator struct { c *C.rocksdb_iterator_t } @@ -143,7 +142,7 @@ func (iter *Iterator) fetchNextManyKeys(reverse bool, limit int, keyPrefix, keyE cKeyFilter.key_end = cKeyEnd cKeyFilter.key_end_s = C.size_t(len(keyEnd)) } - return &ManyKeys{c: C.gorocksdb_iter_many_keys(iter.c, C.int(limit), C.bool(btoi(reverse)), &cKeyFilter, C.int(ManyKeysPageAllocSize))} + return &ManyKeys{c: C.gorocksdb_iter_many_keys(iter.c, C.int(limit), C.bool(reverse), &cKeyFilter, C.int(ManyKeysPageAllocSize))} } // NextManyKeys... @@ -176,8 +175,8 @@ func (iter *Iterator) ManySearchKeys(searches []KeysSearch) *ManyManyKeys { for i := range searches { cKSearch := C.gorocksdb_keys_search_t{ limit: C.int(searches[i].Limit), - reverse: C.bool(btoi(searches[i].Reverse)), - exclude_key_from: C.bool(btoi(searches[i].ExcludeKeyFrom)), + reverse: C.bool(searches[i].Reverse), + exclude_key_from: C.bool(searches[i].ExcludeKeyFrom), } cKSearch.key_from = C.CString(string(searches[i].KeyFrom)) cKSearch.key_from_s = C.size_t(len(searches[i].KeyFrom)) diff --git a/options.go b/options.go index 0f1376ad..f1259c69 100644 --- a/options.go +++ b/options.go @@ -225,7 +225,8 @@ func (opts *Options) SetParanoidChecks(value bool) { // // For example, you have a flash device with 10GB allocated for the DB, // as well as a hard drive of 2TB, you should config it to be: -// [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] +// +// [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] // // The system will try to guarantee data under each path is close to but // not larger than the target size. But current and future file sizes used @@ -476,17 +477,9 @@ func (opts *Options) SetLevel0StopWritesTrigger(value int) { C.rocksdb_options_set_level0_stop_writes_trigger(opts.c, C.int(value)) } -// SetMaxMemCompactionLevel sets the maximum level -// to which a new compacted memtable is pushed if it does not create overlap. -// -// We try to push to level 2 to avoid the -// relatively expensive level 0=>1 compactions and to avoid some -// expensive manifest file operations. We do not push all the way to -// the largest level since that can generate a lot of wasted disk -// space if the same key space is being repeatedly overwritten. -// Default: 2 +// SetMaxMemCompactionLevel is deprecated and has been removed in RocksDB v10.2.1 func (opts *Options) SetMaxMemCompactionLevel(value int) { - C.rocksdb_options_set_max_mem_compaction_level(opts.c, C.int(value)) + // This function is deprecated and removed in RocksDB v10.2.1 } // SetTargetFileSizeBase sets the target file size for compaction. @@ -531,17 +524,18 @@ func (opts *Options) SetMaxBytesForLevelMultiplier(value float64) { C.rocksdb_options_set_max_bytes_for_level_multiplier(opts.c, C.double(value)) } -// SetLevelCompactiondynamiclevelbytes specifies whether to pick +// SetLevelCompactionDynamicLevelBytes specifies whether to pick // target size of each level dynamically. // // We will pick a base level b >= 1. L0 will be directly merged into level b, // instead of always into level 1. Level 1 to b-1 need to be empty. // We try to pick b and its target size so that -// 1. target size is in the range of -// (max_bytes_for_level_base / max_bytes_for_level_multiplier, -// max_bytes_for_level_base] -// 2. target size of the last level (level num_levels-1) equals to extra size -// of the level. +// 1. target size is in the range of +// (max_bytes_for_level_base / max_bytes_for_level_multiplier, +// max_bytes_for_level_base] +// 2. target size of the last level (level num_levels-1) equals to extra size +// of the level. +// // At the same time max_bytes_for_level_multiplier and // max_bytes_for_level_multiplier_additional are still satisfied. // @@ -730,32 +724,19 @@ func (opts *Options) SetKeepLogFileNum(value int) { C.rocksdb_options_set_keep_log_file_num(opts.c, C.size_t(value)) } -// SetSoftRateLimit sets the soft rate limit. -// -// Puts are delayed 0-1 ms when any level has a compaction score that exceeds -// soft_rate_limit. This is ignored when == 0.0. -// CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not -// hold, RocksDB will set soft_rate_limit = hard_rate_limit -// Default: 0.0 (disabled) +// SetSoftRateLimit is deprecated and has been removed in RocksDB v10.2.1 func (opts *Options) SetSoftRateLimit(value float64) { - C.rocksdb_options_set_soft_rate_limit(opts.c, C.double(value)) + // This function is deprecated and removed in RocksDB v10.2.1 } -// SetHardRateLimit sets the hard rate limit. -// -// Puts are delayed 1ms at a time when any level has a compaction score that -// exceeds hard_rate_limit. This is ignored when <= 1.0. -// Default: 0.0 (disabled) +// SetHardRateLimit is deprecated and has been removed in RocksDB v10.2.1 func (opts *Options) SetHardRateLimit(value float64) { - C.rocksdb_options_set_hard_rate_limit(opts.c, C.double(value)) + // This function is deprecated and removed in RocksDB v10.2.1 } -// SetRateLimitDelayMaxMilliseconds sets the max time -// a put will be stalled when hard_rate_limit is enforced. -// If 0, then there is no limit. -// Default: 1000 +// SetRateLimitDelayMaxMilliseconds is deprecated and has been removed in RocksDB v10.2.1 func (opts *Options) SetRateLimitDelayMaxMilliseconds(value uint) { - C.rocksdb_options_set_rate_limit_delay_max_milliseconds(opts.c, C.uint(value)) + // This function is deprecated and removed in RocksDB v10.2.1 } // SetMaxManifestFileSize sets the maximal manifest file size until is rolled over. @@ -771,17 +752,9 @@ func (opts *Options) SetTableCacheNumshardbits(value int) { C.rocksdb_options_set_table_cache_numshardbits(opts.c, C.int(value)) } -// SetTableCacheRemoveScanCountLimit sets the count limit during a scan. -// -// During data eviction of table's LRU cache, it would be inefficient -// to strictly follow LRU because this piece of memory will not really -// be released unless its refcount falls to zero. Instead, make two -// passes: the first pass will release items with refcount = 1, -// and if not enough space releases after scanning the number of -// elements specified by this parameter, we will remove items in LRU order. -// Default: 16 +// SetTableCacheRemoveScanCountLimit is deprecated and has been removed in RocksDB v10.2.1 func (opts *Options) SetTableCacheRemoveScanCountLimit(value int) { - C.rocksdb_options_set_table_cache_remove_scan_count_limit(opts.c, C.int(value)) + // This function is deprecated and removed in RocksDB v10.2.1 } // SetArenaBlockSize sets the size of one block in arena memory allocation. @@ -804,17 +777,18 @@ func (opts *Options) SetDisableAutoCompactions(value bool) { // SetWALTtlSeconds sets the WAL ttl in seconds. // // The following two options affect how archived logs will be deleted. -// 1. If both set to 0, logs will be deleted asap and will not get into -// the archive. -// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0, -// WAL files will be checked every 10 min and if total size is greater -// then wal_size_limit_mb, they will be deleted starting with the -// earliest until size_limit is met. All empty files will be deleted. -// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then -// WAL files will be checked every wal_ttl_seconds / 2 and those that -// are older than wal_ttl_seconds will be deleted. -// 4. If both are not 0, WAL files will be checked every 10 min and both -// checks will be performed with ttl being first. +// 1. If both set to 0, logs will be deleted asap and will not get into +// the archive. +// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0, +// WAL files will be checked every 10 min and if total size is greater +// then wal_size_limit_mb, they will be deleted starting with the +// earliest until size_limit is met. All empty files will be deleted. +// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then +// WAL files will be checked every wal_ttl_seconds / 2 and those that +// are older than wal_ttl_seconds will be deleted. +// 4. If both are not 0, WAL files will be checked every 10 min and both +// checks will be performed with ttl being first. +// // Default: 0 func (opts *Options) SetWALTtlSeconds(value uint64) { C.rocksdb_options_set_WAL_ttl_seconds(opts.c, C.uint64_t(value)) @@ -840,11 +814,9 @@ func (opts *Options) SetManifestPreallocationSize(value int) { C.rocksdb_options_set_manifest_preallocation_size(opts.c, C.size_t(value)) } -// SetPurgeRedundantKvsWhileFlush enable/disable purging of -// duplicate/deleted keys when a memtable is flushed to storage. -// Default: true +// SetPurgeRedundantKvsWhileFlush is deprecated and has been removed in RocksDB v10.2.1 func (opts *Options) SetPurgeRedundantKvsWhileFlush(value bool) { - C.rocksdb_options_set_purge_redundant_kvs_while_flush(opts.c, boolToChar(value)) + // This function is deprecated and removed in RocksDB v10.2.1 } // SetAllowMmapReads enable/disable mmap reads for reading sst tables. @@ -878,12 +850,9 @@ func (opts *Options) SetIsFdCloseOnExec(value bool) { C.rocksdb_options_set_is_fd_close_on_exec(opts.c, boolToChar(value)) } -// SetSkipLogErrorOnRecovery enable/disable skipping of -// log corruption error on recovery (If client is ok with -// losing most recent changes) -// Default: false +// SetSkipLogErrorOnRecovery is deprecated and has been removed in RocksDB v10.2.1 func (opts *Options) SetSkipLogErrorOnRecovery(value bool) { - C.rocksdb_options_set_skip_log_error_on_recovery(opts.c, boolToChar(value)) + // This function is deprecated and removed in RocksDB v10.2.1 } // SetStatsDumpPeriodSec sets the stats dump period in seconds. @@ -915,13 +884,9 @@ func (opts *Options) SetDbWriteBufferSize(value int) { C.rocksdb_options_set_db_write_buffer_size(opts.c, C.size_t(value)) } -// SetAccessHintOnCompactionStart specifies the file access pattern -// once a compaction is started. -// -// It will be applied to all input files of a compaction. -// Default: NormalCompactionAccessPattern +// SetAccessHintOnCompactionStart is deprecated and has been removed in RocksDB v10.2.1 func (opts *Options) SetAccessHintOnCompactionStart(value CompactionAccessPattern) { - C.rocksdb_options_set_access_hint_on_compaction_start(opts.c, C.int(value)) + // This function is deprecated and removed in RocksDB v10.2.1 } // SetUseAdaptiveMutex enable/disable adaptive mutex, which spins @@ -1012,7 +977,9 @@ func (opts *Options) SetInplaceUpdateNumLocks(value int) { // If <=0, it won't allocate from huge page but from malloc. // Users are responsible to reserve huge pages for it to be allocated. For // example: -// sysctl -w vm.nr_hugepages=20 +// +// sysctl -w vm.nr_hugepages=20 +// // See linux doc Documentation/vm/hugetlbpage.txt // If there isn't enough free huge page available, it will fall back to // malloc. @@ -1080,7 +1047,8 @@ func (opts *Options) SetMemtableVectorRep() { // bucketCount: number of fixed array buckets // skiplistHeight: the max height of the skiplist // skiplistBranchingFactor: probabilistic size ratio between adjacent -// link lists in the skiplist +// +// link lists in the skiplist func (opts *Options) SetHashSkipListRep(bucketCount int, skiplistHeight, skiplistBranchingFactor int32) { C.rocksdb_options_set_hash_skip_list_rep(opts.c, C.size_t(bucketCount), C.int32_t(skiplistHeight), C.int32_t(skiplistBranchingFactor)) } @@ -1103,16 +1071,33 @@ func (opts *Options) SetHashLinkListRep(bucketCount int) { // a linear search is used. // // keyLen: plain table has optimization for fix-sized keys, -// which can be specified via keyLen. +// +// which can be specified via keyLen. +// // bloomBitsPerKey: the number of bits used for bloom filer per prefix. You -// may disable it by passing a zero. +// +// may disable it by passing a zero. +// // hashTableRatio: the desired utilization of the hash table used for prefix -// hashing. hashTableRatio = number of prefixes / #buckets -// in the hash table +// +// hashing. hashTableRatio = number of prefixes / #buckets +// in the hash table +// // indexSparseness: inside each prefix, need to build one index record for how -// many keys for binary search inside each hash bucket. +// +// many keys for binary search inside each hash bucket. func (opts *Options) SetPlainTableFactory(keyLen uint32, bloomBitsPerKey int, hashTableRatio float64, indexSparseness int) { - C.rocksdb_options_set_plain_table_factory(opts.c, C.uint32_t(keyLen), C.int(bloomBitsPerKey), C.double(hashTableRatio), C.size_t(indexSparseness)) + C.rocksdb_options_set_plain_table_factory( + opts.c, + C.uint32_t(keyLen), + C.int(bloomBitsPerKey), + C.double(hashTableRatio), + C.size_t(indexSparseness), + 0, // huge_page_tlb_size (default 0) + C.char(0), // encoding_type (default nil) + C.uchar(0), // full_scan_mode (default false) + C.uchar(0), // store_index_in_file (default false) + ) } // SetCreateIfMissingColumnFamilies specifies whether the column families diff --git a/options_block_based_table.go b/options_block_based_table.go index e91bed01..345a10f9 100644 --- a/options_block_based_table.go +++ b/options_block_based_table.go @@ -125,12 +125,9 @@ func (opts *BlockBasedTableOptions) SetBlockCache(cache *Cache) { C.rocksdb_block_based_options_set_block_cache(opts.c, cache.c) } -// SetBlockCacheCompressed sets the cache for compressed blocks. -// If nil, rocksdb will not use a compressed block cache. -// Default: nil +// SetBlockCacheCompressed is deprecated and has been removed in RocksDB v10.2.1 func (opts *BlockBasedTableOptions) SetBlockCacheCompressed(cache *Cache) { - opts.compCache = cache - C.rocksdb_block_based_options_set_block_cache_compressed(opts.c, cache.c) + // This function is deprecated and removed in RocksDB v10.2.1 } // SetWholeKeyFiltering specify if whole keys in the filter (not just prefixes)