Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,16 @@ jobs:
echo "ncpu=$ncpu" >> $GITHUB_ENV
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV

- name: Install DLLs dependencies (Windows)
if: >
runner.os == 'Windows' &&
steps.windows-dlls-cache.outputs.cache-hit != 'true'
run: |
mkdir -p external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x -y external/windeps.zip -oexternal/dlls-${{ matrix.target.cpu }}
echo "${{ github.workspace }}/external/dlls-${{ matrix.target.cpu }}" >> $GITHUB_PATH

- name: Build Nim and Nimble
run: |
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
Expand Down
8 changes: 8 additions & 0 deletions rocksdb/columnfamily.nim
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,14 @@ template multiGet*(
## Get a batch of values for the given set of keys.
cf.db.multiGet(keys, sortedInput, cf.handle)

template multiGet*[N](
cf: ColFamilyReadOnly | ColFamilyReadWrite,
keys: array[N, seq[byte]],
sortedInput = false,
): RocksDBResult[array[N, Opt[seq[byte]]]] =
## Get a batch of values for the given set of keys.
cf.db.multiGet(keys, sortedInput, cf.handle)

template put*(cf: ColFamilyReadWrite, key, val: openArray[byte]): RocksDBResult[void] =
## Puts a value for the given key into the column family.
cf.db.put(key, val, cf.handle)
Expand Down
78 changes: 77 additions & 1 deletion rocksdb/rocksdb.nim
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ export
writebatch, writebatchwi, snapshot.SnapshotRef, snapshot.isClosed,
snapshot.getSequenceNumber

const emptyValue*: seq[byte] = @[]

type
RocksDbPtr* = ptr rocksdb_t
IngestExternalFilesOptionsPtr = ptr rocksdb_ingestexternalfileoptions_t
Expand Down Expand Up @@ -341,7 +343,7 @@ proc multiGet*(
var vLen: csize_t = 0
let src = rocksdb_pinnableslice_value(v, vLen.addr)
if vLen == 0:
values[i] = Opt.some(default(seq[byte]))
values[i] = Opt.some(emptyValue)
continue

assert vLen > 0
Expand All @@ -356,6 +358,80 @@ proc multiGet*(

ok(values)

proc multiGet*[N](
db: RocksDbRef,
keys: array[N, seq[byte]],
sortedInput = false,
cfHandle = db.defaultCfHandle,
): RocksDBResult[array[N, Opt[seq[byte]]]] =
## Get a batch of values for the given set of keys.
## Use this variant when the number of keys are known at compile time.
##
## The multiGet API improves performance by batching operations
## in the read path for greater efficiency. Currently, only the block based
## table format with full filters are supported. Other table formats such
## as plain table, block based table with block based filters and
## partitioned indexes will still work, but will not get any performance
## benefits.
##
## sortedInput - If true, it means the input keys are already sorted by key
## order, so the MultiGet() API doesn't have to sort them again. If false,
## the keys will be copied and sorted internally by the API - the input
## array will not be modified.
assert keys.len() > 0

var
keysList {.noinit.}: array[N, cstring]
keysListSizes {.noinit.}: array[N, csize_t]
errors: array[N, cstring]

for i in 0 .. keys.high:
keysList[i] = cast[cstring](keys[i].unsafeAddrOrNil())
keysListSizes[i] = csize_t(keys[i].len)

var valuesPtrs: array[N, ptr rocksdb_pinnableslice_t]
rocksdb_batched_multi_get_cf(
db.cPtr,
db.readOpts.cPtr,
cfHandle.cPtr,
csize_t(keys.len),
cast[cstringArray](keysList[0].addr),
keysListSizes[0].addr,
valuesPtrs[0].addr,
cast[cstringArray](errors[0].addr),
sortedInput,
)

for e in errors:
if not e.isNil:
let res = err($(e))
rocksdb_free(e)
return res

var values {.noinit.}: array[N, Opt[seq[byte]]]
for i, v in valuesPtrs:
if v.isNil():
values[i] = Opt.none(seq[byte])
continue

var vLen: csize_t = 0
let src = rocksdb_pinnableslice_value(v, vLen.addr)
if vLen == 0:
values[i] = Opt.some(emptyValue)
continue

assert vLen > 0
var dest =
when NimMajor >= 2 and NimMinor >= 2:
newSeq[byte](vLen.int)
else:
newSeq[byte](vLen.int)
copyMem(dest[0].addr, src, vLen)
values[i] = Opt.some(dest)
rocksdb_pinnableslice_destroy(v)

ok(values)

proc put*(
db: RocksDbReadWriteRef, key, val: openArray[byte], cfHandle = db.defaultCfHandle
): RocksDBResult[void] =
Expand Down
27 changes: 25 additions & 2 deletions tests/test_columnfamily.nim
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ suite "ColFamily Tests":
let
keyValue1 = @[100.byte]
keyValue2 = @[300.byte]
keyValue3 = default(seq[byte])
keyValue3 = emptyValue

check:
cf.put(keyValue1, keyValue1).isOk()
Expand All @@ -146,4 +146,27 @@ suite "ColFamily Tests":
dataRes.len() == 3
dataRes[0] == Opt.some(keyValue1)
dataRes[1] == Opt.some(keyValue2)
dataRes[2] == Opt.some(default(seq[byte]))
dataRes[2] == Opt.some(emptyValue)

test "Test multiget - array":
let cf = db.getColFamily(CF_OTHER).get()

let
keyValue1 = @[100.byte]
keyValue2 = @[300.byte]
keyValue3 = emptyValue

check:
cf.put(keyValue1, keyValue1).isOk()
cf.put(keyValue2, keyValue2).isOk()
cf.put(keyValue3, keyValue3).isOk()
cf.keyExists(keyValue1).get() == true
cf.keyExists(keyValue2).get() == true
cf.keyExists(keyValue3).get() == true

let dataRes = cf.multiGet([keyValue1, keyValue2, keyValue3]).expect("ok")
check:
dataRes.len() == 3
dataRes[0] == Opt.some(keyValue1)
dataRes[1] == Opt.some(keyValue2)
dataRes[2] == Opt.some(emptyValue)
59 changes: 59 additions & 0 deletions tests/test_rocksdb.nim
Original file line number Diff line number Diff line change
Expand Up @@ -626,3 +626,62 @@ suite "RocksDbRef Tests":
dataRes[6] == Opt.some(keyValue7)
dataRes[7] == Opt.none(seq[byte])
dataRes[8] == Opt.some(keyValue9)

test "Test multiget - array":
let
keyValue1 = @[1.byte]
keyValue2 = @[2.byte]
keyValue3 = @[3.byte]
keyValue4 = @[4.byte]
keyValue5 = @[5.byte]
keyValue6 = @[6.byte]
keyValue7 = @[7.byte]
keyValue8 = @[8.byte]
keyValue9 = @[9.byte]

check:
db.put(keyValue1, keyValue1).isOk()
db.put(keyValue2, keyValue2).isOk()
db.put(keyValue5, keyValue5).isOk()
db.put(keyValue7, keyValue7).isOk()
db.put(keyValue9, keyValue9).isOk()
db.keyExists(keyValue1).get() == true
db.keyExists(keyValue2).get() == true
db.keyExists(keyValue3).get() == false

block:
let dataRes = db.multiGet([keyValue1]).expect("ok")
check:
dataRes.len() == 1
dataRes[0] == Opt.some(keyValue1)

block:
let dataRes = db.multiGet([keyValue1, keyValue2]).expect("ok")
check:
dataRes.len() == 2
dataRes[0] == Opt.some(keyValue1)
dataRes[1] == Opt.some(keyValue2)

block:
let dataRes = db.multiGet([keyValue2, keyValue3]).expect("ok")
check:
dataRes.len() == 2
dataRes[0] == Opt.some(keyValue2)
dataRes[1] == Opt.none(seq[byte])

block:
let dataRes = db.multiGet([keyValue1, keyValue2, keyValue3]).expect("ok")
check:
dataRes.len() == 3
dataRes[0] == Opt.some(keyValue1)
dataRes[1] == Opt.some(keyValue2)
dataRes[2] == Opt.none(seq[byte])

block:
let dataRes =
db.multiGet([keyValue1, keyValue2, keyValue3], sortedInput = true).expect("ok")
check:
dataRes.len() == 3
dataRes[0] == Opt.some(keyValue1)
dataRes[1] == Opt.some(keyValue2)
dataRes[2] == Opt.none(seq[byte])
Loading