Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement/use a byte slice pool #254

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 18 additions & 10 deletions accumulator/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,28 @@ import (
"crypto/sha512"
"fmt"
"math/rand"

"github.com/mit-dci/utreexo/common"
)

// Hash :
// Hash is the 32 bytes of a sha256 hash
type Hash [32]byte

// Prefix for printfs
func (h Hash) Prefix() []byte {
return h[:4]
}

// Mini :
// Mini takes the first 12 slices of a hash and outputs a MiniHash
func (h Hash) Mini() (m MiniHash) {
copy(m[:], h[:12])
return
}

// MiniHash :
// MiniHash is the first 12 bytes of a sha256 hash
type MiniHash [12]byte

// HashFromString :
// HashFromString takes a string and hashes with sha256
func HashFromString(s string) Hash {
return sha256.Sum256([]byte(s))
}
Expand All @@ -35,7 +37,8 @@ type arrow struct {
collapse bool
}

// Node :
// node is an element in the utreexo tree and is represented by a position
// and a hash
type node struct {
Pos uint64
Val Hash
Expand All @@ -53,14 +56,18 @@ type simLeaf struct {
duration int32
}

// Parent gets you the merkle parent. So far no committing to height.
// if the left child is zero it should crash...
// parentHash gets you the merkle parent of two children hashes.
// TODO So far no committing to height.
func parentHash(l, r Hash) Hash {
var empty Hash
if l == empty || r == empty {
panic("got an empty leaf here. ")
}
return sha512.Sum512_256(append(l[:], r[:]...))
buf := common.NewFreeBytes()
defer buf.Free()
buf.Bytes = append(buf.Bytes, l[:]...)
buf.Bytes = append(buf.Bytes, r[:]...)
return sha512.Sum512_256(buf.Bytes)
}

// SimChain is for testing; it spits out "blocks" of adds and deletes
Expand All @@ -73,7 +80,7 @@ type SimChain struct {
lookahead int32
}

// NewSimChain :
// NewSimChain initializes and returns a Simchain
func NewSimChain(duration uint32) *SimChain {
var s SimChain
s.blockHeight = -1
Expand Down Expand Up @@ -119,7 +126,8 @@ func (s *SimChain) ttlString() string {
return x
}

// NextBlock :
// NextBlock outputs a new simulation block given the additions for the block
// to be outputed
func (s *SimChain) NextBlock(numAdds uint32) ([]Leaf, []int32, []Hash) {
s.blockHeight++
fmt.Printf("blockHeight %d\n", s.blockHeight)
Expand Down
21 changes: 15 additions & 6 deletions btcacc/leaf.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ import (
"fmt"
"io"
"strconv"

"github.com/mit-dci/utreexo/common"
)

const HashSize = 32
Expand Down Expand Up @@ -71,14 +73,19 @@ func (l *LeafData) Serialize(w io.Writer) (err error) {

_, err = w.Write(l.BlockHash[:])
_, err = w.Write(l.TxHash[:])
err = binary.Write(w, binary.BigEndian, l.Index)
err = binary.Write(w, binary.BigEndian, hcb)
err = binary.Write(w, binary.BigEndian, l.Amt)

freeBytes := common.NewFreeBytes()
defer freeBytes.Free()

err = freeBytes.PutUint32(w, binary.BigEndian, l.Index)
err = freeBytes.PutUint32(w, binary.BigEndian, uint32(hcb))
err = freeBytes.PutUint64(w, binary.BigEndian, uint64(l.Amt))

if len(l.PkScript) > 10000 {
err = fmt.Errorf("pksize too long")
return
}
err = binary.Write(w, binary.BigEndian, uint16(len(l.PkScript)))
err = freeBytes.PutUint16(w, binary.BigEndian, uint16(len(l.PkScript)))
_, err = w.Write(l.PkScript)
return
}
Expand Down Expand Up @@ -121,7 +128,9 @@ func (l *LeafData) Deserialize(r io.Reader) (err error) {

// LeafHash turns a LeafData into a LeafHash
func (l *LeafData) LeafHash() [32]byte {
var buf bytes.Buffer
l.Serialize(&buf)
freeBytes := common.NewFreeBytes()
defer freeBytes.Free()
buf := bytes.NewBuffer(freeBytes.Bytes)
l.Serialize(buf)
return sha512.Sum512_256(buf.Bytes())
}
38 changes: 38 additions & 0 deletions btcacc/leaf_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
package btcacc

import (
"bytes"
"fmt"
"testing"
)

func TestLeafDataSerialize(t *testing.T) {
ld := LeafData{
TxHash: Hash{1, 2, 3, 4},
Index: 0,
Height: 2,
Coinbase: false,
Amt: 3000,
PkScript: []byte{1, 2, 3, 4, 5, 6},
}

// Before
writer := &bytes.Buffer{}
ld.Serialize(writer)
beforeBytes := writer.Bytes()

// After
checkLeaf := LeafData{}
checkLeaf.Deserialize(writer)

afterWriter := &bytes.Buffer{}
checkLeaf.Serialize(afterWriter)
afterBytes := afterWriter.Bytes()

if !bytes.Equal(beforeBytes, afterBytes) {
err := fmt.Errorf("Serialize/Deserialize LeafData fail\n"+
"beforeBytes len: %v\n, afterBytes len:%v\n",
len(beforeBytes), len(afterBytes))
t.Fatal(err)
}
}
54 changes: 18 additions & 36 deletions btcacc/udata.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,11 @@ import (
"io"

"github.com/mit-dci/utreexo/accumulator"
"github.com/mit-dci/utreexo/common"
)

// UData is all the data needed to verify the utreexo accumulator proof
// for a given block
type UData struct {
Height int32
AccProof accumulator.BatchProof
Expand Down Expand Up @@ -52,22 +55,20 @@ func (ud *UData) ProofSanity(nl uint64, h uint8) bool {
return false
}
}
// return to presorted target list
// ud.AccProof.Targets = presort

return true
}

// on disk
// aaff aaff 0000 0014 0000 0001 0000 0001 0000 0000 0000 0000 0000 0000
// magic | size | height | numttls | ttl0 | numTgts | ????

// ToBytes serializes UData into bytes.
// Serialize serializes UData into bytes.
// First, height, 4 bytes.
// Then, number of TTL values (4 bytes, even though we only need 2)
// Then a bunch of TTL values, (4B each) one for each txo in the associated block
// batch proof
// Bunch of LeafDatas

// Then a bunch of TTL values, (4B each) one for each txo in the
// associated block batch proof
// And the rest is a bunch of LeafDatas
func (ud *UData) Serialize(w io.Writer) (err error) {
err = binary.Write(w, binary.BigEndian, ud.Height)
if err != nil { // ^ 4B block height
Expand All @@ -89,43 +90,33 @@ func (ud *UData) Serialize(w io.Writer) (err error) {
return
}

// fmt.Printf("accproof %d bytes\n", ud.AccProof.SerializeSize())

// write all the leafdatas
for _, ld := range ud.Stxos {
// fmt.Printf("writing ld %d %s\n", i, ld.ToString())
err = ld.Serialize(w)
if err != nil {
return
}
// fmt.Printf("h %d leaf %d %s len %d\n",
// ud.Height, i, ld.Outpoint.String(), len(ld.PkScript))
}

return
}

//
// SerializeSize outputs the size of the udata when it is serialized
func (ud *UData) SerializeSize() int {
var ldsize int
var b bytes.Buffer
buf := common.NewFreeBytes()
bufWriter := bytes.NewBuffer(buf.Bytes)

// TODO this is slow, can remove double checking once it works reliably
// Grab the size of all the stxos
for _, l := range ud.Stxos {
ldsize += l.SerializeSize()
b.Reset()
l.Serialize(&b)
if b.Len() != l.SerializeSize() {
fmt.Printf(" b.Len() %d, l.SerializeSize() %d\n",
b.Len(), l.SerializeSize())
}
}

b.Reset()
ud.AccProof.Serialize(&b)
if b.Len() != ud.AccProof.SerializeSize() {
bufWriter.Reset()
ud.AccProof.Serialize(bufWriter)
if bufWriter.Len() != ud.AccProof.SerializeSize() {
fmt.Printf(" b.Len() %d, AccProof.SerializeSize() %d\n",
b.Len(), ud.AccProof.SerializeSize())
bufWriter.Len(), ud.AccProof.SerializeSize())
}

guess := 8 + (4 * len(ud.TxoTTLs)) + ud.AccProof.SerializeSize() + ldsize
Expand All @@ -134,23 +125,20 @@ func (ud *UData) SerializeSize() int {
return guess
}

// Deserialize reads from the reader and deserializes the udata
func (ud *UData) Deserialize(r io.Reader) (err error) {

err = binary.Read(r, binary.BigEndian, &ud.Height)
if err != nil { // ^ 4B block height
fmt.Printf("ud deser Height err %s\n", err.Error())
return
}
// fmt.Printf("read height %d\n", ud.Height)

var numTTLs uint32
err = binary.Read(r, binary.BigEndian, &numTTLs)
if err != nil { // ^ 4B num ttls
fmt.Printf("ud deser numTTLs err %s\n", err.Error())
return
}
// fmt.Printf("read ttls %d\n", numTTLs)
// fmt.Printf("UData deser read h %d - %d ttls ", ud.Height, numTTLs)

ud.TxoTTLs = make([]int32, numTTLs)
for i, _ := range ud.TxoTTLs { // write all ttls
Expand All @@ -159,7 +147,6 @@ func (ud *UData) Deserialize(r io.Reader) (err error) {
fmt.Printf("ud deser LeafTTLs[%d] err %s\n", i, err.Error())
return
}
// fmt.Printf("read ttl[%d] %d\n", i, ud.TxoTTLs[i])
}

err = ud.AccProof.Deserialize(r)
Expand All @@ -168,8 +155,6 @@ func (ud *UData) Deserialize(r io.Reader) (err error) {
return
}

// fmt.Printf("%d byte accproof, read %d targets\n",
// ud.AccProof.SerializeSize(), len(ud.AccProof.Targets))
// we've already gotten targets. 1 leafdata per target
ud.Stxos = make([]LeafData, len(ud.AccProof.Targets))
for i, _ := range ud.Stxos {
Expand All @@ -180,9 +165,6 @@ func (ud *UData) Deserialize(r io.Reader) (err error) {
ud.Height, numTTLs, len(ud.AccProof.Targets), i, err.Error())
return
}
// fmt.Printf("h %d leaf %d %s len %d\n",
// ud.Height, i, ud.Stxos[i].Outpoint.String(), len(ud.Stxos[i].PkScript))

}

return
Expand Down Expand Up @@ -212,6 +194,7 @@ func GenUData(delLeaves []LeafData, forest *accumulator.Forest, height int32) (

ud.Height = height
ud.Stxos = delLeaves

// make slice of hashes from leafdata
delHashes := make([]accumulator.Hash, len(ud.Stxos))
for i, _ := range ud.Stxos {
Expand All @@ -233,6 +216,5 @@ func GenUData(delLeaves []LeafData, forest *accumulator.Forest, height int32) (
return
}

// fmt.Printf(ud.AccProof.ToString())
return
}
Loading