1
1
mirror of https://github.com/wader/fq.git synced 2024-09-11 12:05:39 +03:00

leveldb: decode unfragmented .log files further; fix UTF8 decoding

decode unfragmented .log files:

 - break leveldb_log.go into leveldb_log_blocks.go and leveldb_log.go;
   the former is used by both .MANIFEST (descriptor) and .LOG.
 - in leveldb_log, introduce readBatch that decodes further

 fix UTF8 decoding:

 - introduce fieldUTF8ReturnBytes and stringify to handle multi-byte
   UTF8-encodings correctly.
This commit is contained in:
Michael B. 2023-12-09 14:13:33 +01:00
parent e826f097d3
commit 2f5f183106
13 changed files with 504 additions and 365 deletions

View File

@ -706,13 +706,14 @@ $ fq -r -o array=true -d html '.. | select(.[0] == "a" and .[1].href)?.[1].href'
### References
- https://github.com/google/leveldb/blob/main/doc/impl.md#manifest
- https://github.com/google/leveldb/blob/main/doc/log_format.md
- https://github.com/google/leveldb/blob/main/db/version_edit.cc
## leveldb_log
### Limitations
- individual record contents are not merged nor decoded further.
- fragmented non-"full" records are not merged and decoded further.
### Authors
@ -720,7 +721,9 @@ $ fq -r -o array=true -d html '.. | select(.[0] == "a" and .[1].href)?.[1].href'
### References
- https://github.com/google/leveldb/blob/main/doc/impl.md#log-files
- https://github.com/google/leveldb/blob/main/doc/log_format.md
- https://github.com/google/leveldb/blob/main/db/write_batch.cc
## leveldb_table

View File

@ -108,13 +108,6 @@ func readManifest(d *decode.D) {
})
}
func readLengthPrefixedString(name string, d *decode.D) {
d.FieldStruct(name, func(d *decode.D) {
length := d.FieldULEB128("length")
d.FieldUTF8("data", int(length))
})
}
func readTagInternalKey(name string, d *decode.D) {
d.FieldStruct(name, func(d *decode.D) {
length := d.FieldULEB128("length")

View File

@ -9,4 +9,5 @@
### References
- https://github.com/google/leveldb/blob/main/doc/impl.md#manifest
- https://github.com/google/leveldb/blob/main/doc/log_format.md
- https://github.com/google/leveldb/blob/main/db/version_edit.cc

View File

@ -1,19 +1,17 @@
package leveldb
// https://github.com/google/leveldb/blob/main/doc/log_format.md
// https://github.com/google/leveldb/blob/main/doc/impl.md#log-files
// https://github.com/google/leveldb/blob/main/db/write_batch.cc
//
// Files in LevelDB using this format include:
// - *.log
// - MANIFEST-*
import (
"embed"
"github.com/wader/fq/format"
"github.com/wader/fq/internal/mathex"
"github.com/wader/fq/pkg/decode"
"github.com/wader/fq/pkg/interp"
"github.com/wader/fq/pkg/scalar"
)
//go:embed leveldb_log.md
@ -29,124 +27,60 @@ func init() {
interp.RegisterFS(leveldbLogFS)
}
type recordReadOptions struct {
// Both .log- and MANIFEST-files use the Log-format,
// i.e., a sequence of records split into 32KB blocks.
// However, the format of the data within the records differ.
// This function specifies how to read said data.
readDataFn func(size int64, recordType int, d *decode.D)
}
// https://github.com/google/leveldb/blob/main/db/log_format.h
const (
// checksum (4 bytes) + length (2 bytes) + record type (1 byte)
headerSize = (4 + 2 + 1) * 8
blockSize = (32 * 1024) * 8 // 32KB
recordTypeZero = 0 // preallocated file regions
recordTypeFull = 1
recordTypeFirst = 2 // fragments
recordTypeMiddle = 3
recordTypeLast = 4
)
var recordTypes = scalar.UintMapSymStr{
recordTypeZero: "zero",
recordTypeFull: "full",
recordTypeFirst: "first",
recordTypeMiddle: "middle",
recordTypeLast: "last",
}
func ldbLogDecode(d *decode.D) any {
rro := recordReadOptions{readDataFn: func(size int64, recordType int, d *decode.D) {
d.FieldRawLen("data", size)
if recordType == recordTypeFull {
d.FieldStruct("data", func(d *decode.D) {
d.LimitedFn(size, readBatch)
})
} else {
d.FieldRawLen("data", size)
}
}}
readBlockSequence(rro, d)
return nil
}
// Read a sequence of 32KB-blocks (the last one may be less).
// https://github.com/google/leveldb/blob/main/db/log_reader.cc#L189
func readBlockSequence(rro recordReadOptions, d *decode.D) {
d.Endian = decode.LittleEndian
d.FieldArray("blocks", func(d *decode.D) {
for d.BitsLeft() >= headerSize {
d.LimitedFn(mathex.Min(blockSize, d.BitsLeft()), func(d *decode.D) {
d.FieldStruct("block", bind(readLogBlock, rro))
})
}
})
if d.BitsLeft() > 0 {
// The reference implementation says:
// "[...] if buffer_ is non-empty, we have a truncated header at the
// end of the file, which can be caused by the writer crashing in the
// middle of writing the header. Instead of considering this an error,
// just report EOF."
d.FieldRawLen("truncated_block", d.BitsLeft())
}
}
// Read a Log-block, consisting of up to 32KB of records and an optional trailer.
// https://github.com/google/leveldb/blob/main/db/write_batch.cc#L5-L14
//
// block := record* trailer?
func readLogBlock(rro recordReadOptions, d *decode.D) {
if d.BitsLeft() > blockSize {
d.Fatalf("Bits left greater than maximum log-block size of 32KB.")
}
// record*
// WriteBatch::rep_ :=
//
// sequence: fixed64
// count: fixed32
// data: record[count]
//
// record :=
//
// kTypeValue varstring varstring
// kTypeDeletion varstring
//
// varstring :=
//
// len: varint32
// data: uint8[len]
func readBatch(d *decode.D) {
d.FieldU64("sequence")
expectedCount := d.FieldU32("count")
actualCount := uint64(0)
d.FieldArray("records", func(d *decode.D) {
for d.BitsLeft() >= headerSize {
d.FieldStruct("record", bind(readLogRecord, rro))
for !d.End() {
d.FieldStruct("record", func(d *decode.D) {
valueType := d.FieldULEB128("type", valueTypes)
switch valueType {
case valueTypeDeletion:
readLengthPrefixedString("key", d)
case valueTypeValue:
readLengthPrefixedString("key", d)
readLengthPrefixedString("value", d)
default:
d.Fatalf("unknown value type: %d", valueType)
}
})
actualCount++
}
})
// trailer?
if d.BitsLeft() > 0 {
d.FieldRawLen("trailer", d.BitsLeft())
}
}
// Read a Log-record.
//
// checksum: uint32 // crc32c of type and data[] ; little-endian
// length: uint16 // little-endian
// type: uint8 // One of FULL, FIRST, MIDDLE, LAST
// data: uint8[length]
//
// via https://github.com/google/leveldb/blob/main/doc/log_format.md
func readLogRecord(rro recordReadOptions, d *decode.D) {
// header
var checksumValue *decode.Value
var length int64
var recordType int
d.LimitedFn(headerSize, func(d *decode.D) {
d.FieldStruct("header", func(d *decode.D) {
d.FieldU32("checksum", scalar.UintHex)
checksumValue = d.FieldGet("checksum")
length = int64(d.FieldU16("length"))
recordType = int(d.FieldU8("record_type", recordTypes))
})
})
// verify checksum: record type (1 byte) + data (`length` bytes)
d.RangeFn(d.Pos()-8, (1+length)*8, func(d *decode.D) {
bytesToCheck := d.Bits(int(d.BitsLeft()))
actualChecksum := computeChecksum(bytesToCheck)
_ = checksumValue.TryUintScalarFn(d.UintAssert(uint64(actualChecksum)))
})
// data
dataSize := length * 8
rro.readDataFn(dataSize, recordType, d)
}
// simplified `functools.partial` (Python) or `Function.prototype.bind` (JavaScript)
func bind(f func(recordReadOptions, *decode.D), rro recordReadOptions) func(*decode.D) {
return func(d *decode.D) {
f(rro, d)
if actualCount != expectedCount {
d.Errorf("actual record count (%d) does not equal expected count (%d)", actualCount, expectedCount)
}
}

View File

@ -1,6 +1,6 @@
### Limitations
- individual record contents are not merged nor decoded further.
- fragmented non-"full" records are not merged and decoded further.
### Authors
@ -8,4 +8,6 @@
### References
- https://github.com/google/leveldb/blob/main/doc/impl.md#log-files
- https://github.com/google/leveldb/blob/main/doc/log_format.md
- https://github.com/google/leveldb/blob/main/db/write_batch.cc

View File

@ -0,0 +1,133 @@
package leveldb
// https://github.com/google/leveldb/blob/main/doc/log_format.md
//
// Files in LevelDB using the "log-format" of block sequences include:
// - *.log
// - MANIFEST-*
import (
"github.com/wader/fq/internal/mathex"
"github.com/wader/fq/pkg/decode"
"github.com/wader/fq/pkg/scalar"
)
type recordReadOptions struct {
// Both .log- and MANIFEST-files use the Log-format,
// i.e., a sequence of records split into 32KB blocks.
// However, the format of the data within the records differ.
// This function specifies how to read said data.
readDataFn func(size int64, recordType int, d *decode.D)
}
// https://github.com/google/leveldb/blob/main/db/log_format.h
const (
// checksum (4 bytes) + length (2 bytes) + record type (1 byte)
headerSize = (4 + 2 + 1) * 8
blockSize = (32 * 1024) * 8 // 32KB
recordTypeZero = 0 // preallocated file regions
recordTypeFull = 1
recordTypeFirst = 2 // fragments
recordTypeMiddle = 3
recordTypeLast = 4
)
var recordTypes = scalar.UintMapSymStr{
recordTypeZero: "zero",
recordTypeFull: "full",
recordTypeFirst: "first",
recordTypeMiddle: "middle",
recordTypeLast: "last",
}
// Read a sequence of 32KB-blocks (the last one may be less).
// https://github.com/google/leveldb/blob/main/db/log_reader.cc#L189
func readBlockSequence(rro recordReadOptions, d *decode.D) {
d.Endian = decode.LittleEndian
d.FieldArray("blocks", func(d *decode.D) {
for d.BitsLeft() >= headerSize {
d.LimitedFn(mathex.Min(blockSize, d.BitsLeft()), func(d *decode.D) {
d.FieldStruct("block", bind(readLogBlock, rro))
})
}
})
if d.BitsLeft() > 0 {
// The reference implementation says:
// "[...] if buffer_ is non-empty, we have a truncated header at the
// end of the file, which can be caused by the writer crashing in the
// middle of writing the header. Instead of considering this an error,
// just report EOF."
d.FieldRawLen("truncated_block", d.BitsLeft())
}
}
// Read a Log-block, consisting of up to 32KB of records and an optional trailer.
//
// block := record* trailer?
func readLogBlock(rro recordReadOptions, d *decode.D) {
if d.BitsLeft() > blockSize {
d.Fatalf("Bits left greater than maximum log-block size of 32KB.")
}
// record*
d.FieldArray("records", func(d *decode.D) {
for d.BitsLeft() >= headerSize {
d.FieldStruct("record", bind(readLogRecord, rro))
}
})
// trailer?
if d.BitsLeft() > 0 {
d.FieldRawLen("trailer", d.BitsLeft())
}
}
// Read a Log-record.
//
// checksum: uint32 // crc32c of type and data[] ; little-endian
// length: uint16 // little-endian
// type: uint8 // One of FULL, FIRST, MIDDLE, LAST
// data: uint8[length]
//
// via https://github.com/google/leveldb/blob/main/doc/log_format.md
func readLogRecord(rro recordReadOptions, d *decode.D) {
// header
var checksumValue *decode.Value
var length int64
var recordType int
d.LimitedFn(headerSize, func(d *decode.D) {
d.FieldStruct("header", func(d *decode.D) {
d.FieldU32("checksum", scalar.UintHex)
checksumValue = d.FieldGet("checksum")
length = int64(d.FieldU16("length"))
recordType = int(d.FieldU8("record_type", recordTypes))
})
})
// verify checksum: record type (1 byte) + data (`length` bytes)
d.RangeFn(d.Pos()-8, (1+length)*8, func(d *decode.D) {
bytesToCheck := d.Bits(int(d.BitsLeft()))
actualChecksum := computeChecksum(bytesToCheck)
_ = checksumValue.TryUintScalarFn(d.UintAssert(uint64(actualChecksum)))
})
// data
dataSize := length * 8
rro.readDataFn(dataSize, recordType, d)
}
func readLengthPrefixedString(name string, d *decode.D) {
d.FieldStruct(name, func(d *decode.D) {
length := d.FieldULEB128("length")
d.FieldUTF8("data", int(length))
})
}
// simplified `functools.partial` (Python) or `Function.prototype.bind` (JavaScript)
func bind(f func(recordReadOptions, *decode.D), rro recordReadOptions) func(*decode.D) {
return func(d *decode.D) {
f(rro, d)
}
}

View File

@ -61,9 +61,14 @@ var compressionTypes = scalar.UintMapSymStr{
}
// https://github.com/google/leveldb/blob/main/db/dbformat.h#L54
const (
valueTypeDeletion = 0x0
valueTypeValue = 0x1
)
var valueTypes = scalar.UintMapSymStr{
0x0: "deletion",
0x1: "value",
valueTypeDeletion: "deletion",
valueTypeValue: "value",
}
type blockHandle struct {
@ -360,8 +365,8 @@ func readInternalKey(sharedBytes []byte, unsharedSize int, d *decode.D) error {
// case 2: type and sequence_number fit fully in unshared: simulate user_key value.
if unsharedSize >= typeAndSequenceNumberSize {
br := d.FieldRawLen("user_key_suffix", int64(unsharedSize-typeAndSequenceNumberSize)*8)
d.FieldValueStr("user_key", string(append(sharedBytes, d.ReadAllBits(br)...)), strInferred)
suffix := fieldUTF8ReturnBytes("user_key_suffix", unsharedSize-typeAndSequenceNumberSize, d)
d.FieldValueStr("user_key", stringify(sharedBytes, suffix), strInferred)
d.FieldU8("type", valueTypes, scalar.UintHex)
d.FieldU56("sequence_number")
return nil
@ -441,3 +446,21 @@ func mask(crc uint32) uint32 {
// Rotate right by 15 bits and add a constant.
return ((crc >> 15) | (crc << 17)) + kMaskDelta
}
// Concatinate byteslices and convert into a string.
func stringify(byteSlices ...[]byte) string {
var result []byte
for _, b := range byteSlices {
result = append(result, b...)
}
return string(result)
}
func fieldUTF8ReturnBytes(name string, nBytes int, d *decode.D) []byte {
var result []byte
d.RangeFn(d.Pos(), int64(nBytes)*8, func(d *decode.D) {
result = d.BytesLen(nBytes)
})
d.FieldUTF8(name, nBytes)
return result
}

View File

@ -20,4 +20,5 @@ Authors
References
==========
- https://github.com/google/leveldb/blob/main/doc/impl.md#manifest
- https://github.com/google/leveldb/blob/main/doc/log_format.md
- https://github.com/google/leveldb/blob/main/db/version_edit.cc

View File

@ -11,7 +11,7 @@ Decode examples
Limitations
===========
- individual record contents are not merged nor decoded further.
- fragmented non-"full" records are not merged and decoded further.
Authors
=======
@ -19,4 +19,6 @@ Authors
References
==========
- https://github.com/google/leveldb/blob/main/doc/impl.md#log-files
- https://github.com/google/leveldb/blob/main/doc/log_format.md
- https://github.com/google/leveldb/blob/main/db/write_batch.cc

View File

@ -8,33 +8,80 @@ $ fq -d leveldb_log dv log_only.ldb/000003.log
0x000|18 93 40 61 |..@a | checksum: 0x61409318 (valid) 0x0-0x4 (4)
0x000| d8 01 | .. | length: 472 0x4-0x6 (2)
0x000| 01 | . | record_type: "full" (1) 0x6-0x7 (1)
0x000| 01 00 00 00 00 00 00 00 01| .........| data: raw bits 0x7-0x1df (472)
0x010|00 00 00 01 0b 6c 6f 72 65 6d 2e 6c 6f 72 65 6d|.....lorem.lorem|
* |until 0x1de.7 (472) | |
| | | data{}: 0x7-0x1df (472)
0x000| 01 00 00 00 00 00 00 00 | ........ | sequence: 1 0x7-0xf (8)
0x000| 01| .| count: 1 0xf-0x13 (4)
0x010|00 00 00 |... |
| | | records[0:1]: 0x13-0x1df (460)
| | | [0]{}: record 0x13-0x1df (460)
0x010| 01 | . | type: "value" (1) 0x13-0x14 (1)
| | | key{}: 0x14-0x20 (12)
0x010| 0b | . | length: 11 0x14-0x15 (1)
0x010| 6c 6f 72 65 6d 2e 6c 6f 72 65 6d| lorem.lorem| data: "lorem.lorem" 0x15-0x20 (11)
| | | value{}: 0x20-0x1df (447)
0x020|bd 03 |.. | length: 445 0x20-0x22 (2)
0x020| 4c 6f 72 65 6d 20 69 70 73 75 6d 20 64 6f| Lorem ipsum do| data: "Lorem ipsum dolor sit amet, consectetur adipisc..." 0x22-0x1df (445)
0x030|6c 6f 72 20 73 69 74 20 61 6d 65 74 2c 20 63 6f|lor sit amet, co|
* |until 0x1de.7 (445) | |
| | | [1]{}: record 0x1df-0x3be (479)
| | | header{}: 0x1df-0x1e6 (7)
0x1d0| 5a| Z| checksum: 0x12ba655a (valid) 0x1df-0x1e3 (4)
0x1e0|65 ba 12 |e.. |
0x1e0| d8 01 | .. | length: 472 0x1e3-0x1e5 (2)
0x1e0| 01 | . | record_type: "full" (1) 0x1e5-0x1e6 (1)
0x1e0| 02 00 00 00 00 00 00 00 01 00| ..........| data: raw bits 0x1e6-0x3be (472)
0x1f0|00 00 01 0b 6c 6f 72 65 6d 2e 69 70 73 75 6d bd|....lorem.ipsum.|
* |until 0x3bd.7 (472) | |
| | | data{}: 0x1e6-0x3be (472)
0x1e0| 02 00 00 00 00 00 00 00 | ........ | sequence: 2 0x1e6-0x1ee (8)
0x1e0| 01 00| ..| count: 1 0x1ee-0x1f2 (4)
0x1f0|00 00 |.. |
| | | records[0:1]: 0x1f2-0x3be (460)
| | | [0]{}: record 0x1f2-0x3be (460)
0x1f0| 01 | . | type: "value" (1) 0x1f2-0x1f3 (1)
| | | key{}: 0x1f3-0x1ff (12)
0x1f0| 0b | . | length: 11 0x1f3-0x1f4 (1)
0x1f0| 6c 6f 72 65 6d 2e 69 70 73 75 6d | lorem.ipsum | data: "lorem.ipsum" 0x1f4-0x1ff (11)
| | | value{}: 0x1ff-0x3be (447)
0x1f0| bd| .| length: 445 0x1ff-0x201 (2)
0x200|03 |. |
0x200| 4c 6f 72 65 6d 20 69 70 73 75 6d 20 64 6f 6c| Lorem ipsum dol| data: "Lorem ipsum dolor sit amet, consectetur adipisc..." 0x201-0x3be (445)
0x210|6f 72 20 73 69 74 20 61 6d 65 74 2c 20 63 6f 6e|or sit amet, con|
* |until 0x3bd.7 (445) | |
| | | [2]{}: record 0x3be-0x59d (479)
| | | header{}: 0x3be-0x3c5 (7)
0x3b0| 09 81| ..| checksum: 0x890d8109 (valid) 0x3be-0x3c2 (4)
0x3c0|0d 89 |.. |
0x3c0| d8 01 | .. | length: 472 0x3c2-0x3c4 (2)
0x3c0| 01 | . | record_type: "full" (1) 0x3c4-0x3c5 (1)
0x3c0| 03 00 00 00 00 00 00 00 01 00 00| ...........| data: raw bits 0x3c5-0x59d (472)
0x3d0|00 01 0b 6c 6f 72 65 6d 2e 64 6f 6c 6f 72 bd 03|...lorem.dolor..|
* |until 0x59c.7 (472) | |
| | | data{}: 0x3c5-0x59d (472)
0x3c0| 03 00 00 00 00 00 00 00 | ........ | sequence: 3 0x3c5-0x3cd (8)
0x3c0| 01 00 00| ...| count: 1 0x3cd-0x3d1 (4)
0x3d0|00 |. |
| | | records[0:1]: 0x3d1-0x59d (460)
| | | [0]{}: record 0x3d1-0x59d (460)
0x3d0| 01 | . | type: "value" (1) 0x3d1-0x3d2 (1)
| | | key{}: 0x3d2-0x3de (12)
0x3d0| 0b | . | length: 11 0x3d2-0x3d3 (1)
0x3d0| 6c 6f 72 65 6d 2e 64 6f 6c 6f 72 | lorem.dolor | data: "lorem.dolor" 0x3d3-0x3de (11)
| | | value{}: 0x3de-0x59d (447)
0x3d0| bd 03| ..| length: 445 0x3de-0x3e0 (2)
0x3e0|4c 6f 72 65 6d 20 69 70 73 75 6d 20 64 6f 6c 6f|Lorem ipsum dolo| data: "Lorem ipsum dolor sit amet, consectetur adipisc..." 0x3e0-0x59d (445)
* |until 0x59c.7 (445) | |
| | | [3]{}: record 0x59d-0x62c (143)
| | | header{}: 0x59d-0x5a4 (7)
0x590| a0 86 3e| ..>| checksum: 0xb3e86a0 (valid) 0x59d-0x5a1 (4)
0x5a0|0b |. |
0x5a0| 88 00 | .. | length: 136 0x5a1-0x5a3 (2)
0x5a0| 01 | . | record_type: "full" (1) 0x5a3-0x5a4 (1)
0x5a0| 04 00 00 00 00 00 00 00 01 00 00 00| ............| data: raw bits 0x5a4-0x62c (136)
0x5b0|01 03 72 6f 77 76 52 6f 77 2c 20 72 6f 77 2c 20|..rowvRow, row, |
* |until 0x62b.7 (end) (136) | |
| | | data{}: 0x5a4-0x62c (136)
0x5a0| 04 00 00 00 00 00 00 00 | ........ | sequence: 4 0x5a4-0x5ac (8)
0x5a0| 01 00 00 00| ....| count: 1 0x5ac-0x5b0 (4)
| | | records[0:1]: 0x5b0-0x62c (124)
| | | [0]{}: record 0x5b0-0x62c (124)
0x5b0|01 |. | type: "value" (1) 0x5b0-0x5b1 (1)
| | | key{}: 0x5b1-0x5b5 (4)
0x5b0| 03 | . | length: 3 0x5b1-0x5b2 (1)
0x5b0| 72 6f 77 | row | data: "row" 0x5b2-0x5b5 (3)
| | | value{}: 0x5b5-0x62c (119)
0x5b0| 76 | v | length: 118 0x5b5-0x5b6 (1)
0x5b0| 52 6f 77 2c 20 72 6f 77 2c 20| Row, row, | data: "Row, row, row your boat\nGently down the stream...." 0x5b6-0x62c (118)
0x5c0|72 6f 77 20 79 6f 75 72 20 62 6f 61 74 0a 47 65|row your boat.Ge|
* |until 0x62b.7 (end) (118) | |

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@ $ fq -d leveldb_table dv snappy.ldb/000005.ldb
0x01d| 0d | . | unshared_bytes: 13 0x1d5-0x1d6 (1)
0x01d| bd 03 | .. | value_length: 445 0x1d6-0x1d8 (2)
| | | key{}: 0x1d8-0x1e5 (13)
0x01d| 69 70 73 75 6d | ipsum | user_key_suffix: raw bits 0x1d8-0x1dd (5)
0x01d| 69 70 73 75 6d | ipsum | user_key_suffix: "ipsum" 0x1d8-0x1dd (5)
| | | user_key: "lorem.ipsum" (inferred)
0x01d| 01 | . | type: "value" (0x1) 0x1dd-0x1de (1)
0x01d| 02 00| ..| sequence_number: 2 0x1de-0x1e5 (7)
@ -33,7 +33,7 @@ $ fq -d leveldb_table dv snappy.ldb/000005.ldb
0x03a| 0d | . | unshared_bytes: 13 0x3a3-0x3a4 (1)
0x03a| bd 03 | .. | value_length: 445 0x3a4-0x3a6 (2)
| | | key{}: 0x3a6-0x3b3 (13)
0x03a| 6c 6f 72 65 6d | lorem | user_key_suffix: raw bits 0x3a6-0x3ab (5)
0x03a| 6c 6f 72 65 6d | lorem | user_key_suffix: "lorem" 0x3a6-0x3ab (5)
| | | user_key: "lorem.lorem" (inferred)
0x03a| 01 | . | type: "value" (0x1) 0x3ab-0x3ac (1)
0x03a| 01 00 00 00| ....| sequence_number: 1 0x3ac-0x3b3 (7)

View File

@ -20,7 +20,7 @@ $ fq -d leveldb_table dv uncompressed.ldb/000005.ldb
0x1d0| 0d | . | unshared_bytes: 13 0x1d5-0x1d6 (1)
0x1d0| bd 03 | .. | value_length: 445 0x1d6-0x1d8 (2)
| | | key{}: 0x1d8-0x1e5 (13)
0x1d0| 69 70 73 75 6d | ipsum | user_key_suffix: raw bits 0x1d8-0x1dd (5)
0x1d0| 69 70 73 75 6d | ipsum | user_key_suffix: "ipsum" 0x1d8-0x1dd (5)
| | | user_key: "lorem.ipsum" (inferred)
0x1d0| 01 | . | type: "value" (0x1) 0x1dd-0x1de (1)
0x1d0| 02 00| ..| sequence_number: 2 0x1de-0x1e5 (7)
@ -33,7 +33,7 @@ $ fq -d leveldb_table dv uncompressed.ldb/000005.ldb
0x3a0| 0d | . | unshared_bytes: 13 0x3a3-0x3a4 (1)
0x3a0| bd 03 | .. | value_length: 445 0x3a4-0x3a6 (2)
| | | key{}: 0x3a6-0x3b3 (13)
0x3a0| 6c 6f 72 65 6d | lorem | user_key_suffix: raw bits 0x3a6-0x3ab (5)
0x3a0| 6c 6f 72 65 6d | lorem | user_key_suffix: "lorem" 0x3a6-0x3ab (5)
| | | user_key: "lorem.lorem" (inferred)
0x3a0| 01 | . | type: "value" (0x1) 0x3ab-0x3ac (1)
0x3a0| 01 00 00 00| ....| sequence_number: 1 0x3ac-0x3b3 (7)