dcrd/gcs/bits.go
Josh Rickmar 71500c80f2 multi: Add initial committed filter (CF) support
This change begins the work of bringing committed filters to the
network consensus daemon.  Committed filters are designed to enable
light wallets without many of the privacy issues associated with
server-side bloom filtering.

The new gcs package provides the primitives for creating and matching
against Golomb-coded sets (GCS) filters while the blockcf package
provides creation of filters and filter entries for data structures
found in blocks.

The wire package has been updated to define a new protocol version and
service flag for advertising CF support and includes types for the
following new messages: cfheaders, cfilter, cftypes, getcfheaders,
getcfilter, getcftypes.  The peer package and server implementation
have been updated to include support for the new protocol version and
messages.

Filters are created using a collision probability of 2^-20 and are
saved to a new optional database index when running with committed
filter support enabled (the default).  At first startup, if support is
not disabled, the index will be created and populated with filters and
filter headers for all preexisting blocks, and new filters will be
recorded for processed blocks.

Multiple filter types are supported.  The regular filter commits to
output scripts and previous outpoints that any non-voting wallet will
require access to.  Scripts and previous outpoints that can only be
spent by votes and revocations are not committed to the filter.  The
extended filter is a supplementary filter which commits to all
transaction hashes and script data pushes from the input scripts of
non-coinbase regular and ticket purchase transactions.  Creating these
filters is based on the algorithm defined by BIP0158 but is modified
to only commit "regular" data in stake transactions to prevent
committed filters from being used to create SPV voting wallets.
2018-03-30 13:52:12 -04:00

195 lines
3.1 KiB
Go

// Copyright (c) 2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package gcs
import (
"io"
)
type bitWriter struct {
bytes []byte
p *byte // Pointer to last byte
next byte // Next bit to write or skip
}
// writeOne writes a one bit to the bit stream.
func (b *bitWriter) writeOne() {
if b.next == 0 {
b.bytes = append(b.bytes, 1<<7)
b.p = &b.bytes[len(b.bytes)-1]
b.next = 1 << 6
return
}
*b.p |= b.next
b.next >>= 1
}
// writeZero writes a zero bit to the bit stream.
func (b *bitWriter) writeZero() {
if b.next == 0 {
b.bytes = append(b.bytes, 0)
b.p = &b.bytes[len(b.bytes)-1]
b.next = 1 << 6
return
}
b.next >>= 1
}
// writeNBits writes n number of LSB bits of data to the bit stream in big
// endian format. Panics if n > 64.
func (b *bitWriter) writeNBits(data uint64, n uint) {
if n > 64 {
panic("gcs: cannot write more than 64 bits of a uint64")
}
data <<= 64 - n
// If byte is partially written, fill the rest
for n > 0 {
if b.next == 0 {
break
}
if data&(1<<63) != 0 {
b.writeOne()
} else {
b.writeZero()
}
n--
data <<= 1
}
if n == 0 {
return
}
// Write 8 bits at a time.
for n >= 8 {
b.bytes = append(b.bytes, byte(data>>56))
n -= 8
data <<= 8
}
// Write the remaining bits.
for n > 0 {
if data&(1<<63) != 0 {
b.writeOne()
} else {
b.writeZero()
}
n--
data <<= 1
}
}
type bitReader struct {
bytes []byte
next byte // next bit to read in bytes[0]
}
func newBitReader(bitstream []byte) bitReader {
return bitReader{
bytes: bitstream,
next: 1 << 7,
}
}
// readUnary returns the number of unread sequential one bits before the next
// zero bit. Errors with io.EOF if no zero bits are encountered.
func (b *bitReader) readUnary() (uint64, error) {
var value uint64
for {
if len(b.bytes) == 0 {
return value, io.EOF
}
for b.next != 0 {
bit := b.bytes[0] & b.next
b.next >>= 1
if bit == 0 {
return value, nil
}
value++
}
b.bytes = b.bytes[1:]
b.next = 1 << 7
}
}
// readNBits reads n number of LSB bits of data from the bit stream in big
// endian format. Panics if n > 64.
func (b *bitReader) readNBits(n uint) (uint64, error) {
if n > 64 {
panic("gcs: cannot read more than 64 bits as a uint64")
}
if len(b.bytes) == 0 {
return 0, io.EOF
}
var value uint64
// If byte is partially read, read the rest
if b.next != 1<<7 {
for n > 0 {
if b.next == 0 {
b.next = 1 << 7
b.bytes = b.bytes[1:]
break
}
n--
if b.bytes[0]&b.next != 0 {
value |= 1 << n
}
b.next >>= 1
}
}
if n == 0 {
return value, nil
}
// Read 8 bits at a time.
for n >= 8 {
if len(b.bytes) == 0 {
return 0, io.EOF
}
n -= 8
value |= uint64(b.bytes[0]) << n
b.bytes = b.bytes[1:]
}
if len(b.bytes) == 0 {
if n != 0 {
return 0, io.EOF
}
return value, nil
}
// Read the remaining bits.
for n > 0 {
if b.next == 0 {
b.bytes = b.bytes[1:]
if len(b.bytes) == 0 {
return 0, io.EOF
}
b.next = 1 << 7
}
n--
if b.bytes[0]&b.next != 0 {
value |= 1 << n
}
b.next >>= 1
}
return value, nil
}