Skip to content

Commit

Permalink
Reproducible builds implemented. Pruning & fastsync fixes.
Browse files Browse the repository at this point in the history
  • Loading branch information
CaptainDero committed Oct 27, 2022
1 parent 89a1e1e commit 358fab6
Show file tree
Hide file tree
Showing 30 changed files with 195 additions and 112 deletions.
4 changes: 0 additions & 4 deletions astrobwt/suffixarray.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
// // lookup byte slice s
// offsets1 := index.Lookup(s, -1) // the list of all indices where s occurs in data
// offsets2 := index.Lookup(s, 3) // the list of at most 3 indices where s occurs in data
//
package astrobwt

import (
Expand Down Expand Up @@ -230,7 +229,6 @@ func (x *Index) Write(w io.Writer) error {

// Bytes returns the data over which the index was created.
// It must not be modified.
//
func (x *Index) Bytes() []byte {
return x.data
}
Expand All @@ -255,7 +253,6 @@ func (x *Index) lookupAll(s []byte) ints {
// The result is nil if s is empty, s is not found, or n == 0.
// Lookup time is O(log(N)*len(s) + len(result)) where N is the
// size of the indexed data.
//
func (x *Index) Lookup(s []byte, n int) (result []int) {
if len(s) > 0 && n != 0 {
matches := x.lookupAll(s)
Expand Down Expand Up @@ -286,7 +283,6 @@ func (x *Index) Lookup(s []byte, n int) (result []int) {
// in successive order. Otherwise, at most n matches are returned and
// they may not be successive. The result is nil if there are no matches,
// or if n == 0.
//
func (x *Index) FindAllIndex(r *regexp.Regexp, n int) (result [][]int) {
// a non-empty literal prefix is used to determine possible
// match start indices with Lookup
Expand Down
2 changes: 1 addition & 1 deletion block/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ func (bl *Block) GetTXSHash() (result crypto.Hash) {
return
}

//parse entire block completely
// parse entire block completely
func (bl *Block) Deserialize(buf []byte) (err error) {
done := 0

Expand Down
2 changes: 1 addition & 1 deletion block/miniblock.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ func (mbl *MiniBlock) Serialize() (result []byte) {
return b.Bytes()
}

//parse entire block completely
// parse entire block completely
func (mbl *MiniBlock) Deserialize(buf []byte) (err error) {
if len(buf) < MINIBLOCK_SIZE {
return fmt.Errorf("Expected %d bytes. Actual %d", MINIBLOCK_SIZE, len(buf))
Expand Down
9 changes: 5 additions & 4 deletions blockchain/miner_block.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,9 @@ type BlockScore struct {
}

// Heighest height is ordered first, the condition is reverted see eg. at https://golang.org/pkg/sort/#Slice
// if heights are equal, nodes are sorted by their block ids which will never collide , hopefullly
//
// if heights are equal, nodes are sorted by their block ids which will never collide , hopefullly
//
// block ids are sorted by lowest byte first diff
func sort_descending_by_height_blid(tips_scores []BlockScore) {
sort.Slice(tips_scores, func(i, j int) bool {
Expand Down Expand Up @@ -101,7 +103,7 @@ func convert_uint32_to_crypto_hash(i uint32) crypto.Hash {
return h
}

//NOTE: this function is quite big since we do a lot of things in preparation of next blocks
// NOTE: this function is quite big since we do a lot of things in preparation of next blocks
func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address) (cbl *block.Complete_Block, bl block.Block, err error) {
//chain.Lock()
//defer chain.Unlock()
Expand Down Expand Up @@ -343,7 +345,6 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address) (cbl
return
}

//
func ConvertBlockToMiniblock(bl block.Block, miniblock_miner_address rpc.Address) (mbl block.MiniBlock) {
mbl.Version = 1

Expand Down Expand Up @@ -480,7 +481,7 @@ func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte)
// lets try to check pow to detect whether the miner is cheating

if !chain.simulator && !chain.VerifyMiniblockPoW(&bl, mbl) {
logger.V(1).Error(err, "Error ErrInvalidPoW")
//logger.V(1).Error(err, "Error ErrInvalidPoW")
err = errormsg.ErrInvalidPoW
return
}
Expand Down
52 changes: 43 additions & 9 deletions blockchain/prune_history.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,14 @@ func Prune_Blockchain(prune_topo int64) (err error) {
// any error while deleting should be considered non fatal
func discard_blocks_and_transactions(store *storage, topoheight int64) {

globals.Logger.Info("Block store before pruning", "size", ByteCountIEC(DirSize(filepath.Join(store.Block_tx_store.basedir, "bltx_store"))))
size_before := ByteCountIEC(DirSize(filepath.Join(store.Block_tx_store.basedir, "bltx_store")))

for i := int64(0); i < topoheight-20; i++ { // donot some more blocks for sanity currently

if i%1000 == 0 {
globals.Logger.Info("Deleting old block/txs", "done", float64(i*100)/float64(topoheight-20))
}

if toporecord, err := store.Topo_store.Read(i); err == nil {
blid := toporecord.BLOCK_ID

Expand All @@ -138,6 +143,7 @@ func discard_blocks_and_transactions(store *storage, topoheight int64) {
}
}
}
globals.Logger.Info("Block store before pruning", "size", size_before)
globals.Logger.Info("Block store after pruning ", "size", ByteCountIEC(DirSize(filepath.Join(store.Block_tx_store.basedir, "bltx_store"))))
}

Expand Down Expand Up @@ -180,6 +186,12 @@ func clone_snapshot(rsource, wsource *graviton.Store, r_ssversion uint64) (lates
}
}

if latest_commit_version, err = graviton.Commit(write_balance_tree); err != nil {
return 0, err
}

globals.Logger.Info("Main balance tree cloned")

/* h,_ := old_balance_tree.Hash()
fmt.Printf("old balance hash %+v\n",h )
h,_ = write_balance_tree.Hash()
Expand Down Expand Up @@ -214,14 +226,18 @@ func clone_snapshot(rsource, wsource *graviton.Store, r_ssversion uint64) (lates
}
}

if latest_commit_version, err = graviton.Commit(write_meta_tree); err != nil {
return 0, err
}

/* h,_ = old_meta_tree.Hash()
fmt.Printf("old meta hash %+v\n",h )
h,_ = write_meta_tree.Hash()
fmt.Printf("new meta hash %+v\n",h )
os.Exit(0)
*/
var sc_trees []*graviton.Tree
sc_names := map[string]bool{}
// now we have to copy all scs data one by one
for _, scid := range sc_list {
var old_sc_tree, write_sc_tree *graviton.Tree
Expand All @@ -235,12 +251,13 @@ func clone_snapshot(rsource, wsource *graviton.Store, r_ssversion uint64) (lates
for k, v, err := c.First(); err == nil; k, v, err = c.Next() {
write_sc_tree.Put(k, v)
}
sc_trees = append(sc_trees, write_sc_tree)
sc_names[string(scid)] = true
if latest_commit_version, err = graviton.Commit(write_sc_tree); err != nil {
return
}
}
globals.Logger.Info("SCs cloned")

sc_trees = append(sc_trees, write_balance_tree)
sc_trees = append(sc_trees, write_meta_tree)
latest_commit_version, err = graviton.Commit(sc_trees...)
return
}

Expand Down Expand Up @@ -309,7 +326,6 @@ func diff_snapshot(rsource, wsource *graviton.Store, old_version uint64, new_ver

// now we have to copy new scs data one by one
for _, scid := range sc_list_new {

if old_tree, err = old_ss.GetTree(string(scid)); err != nil {
return
}
Expand All @@ -319,7 +335,7 @@ func diff_snapshot(rsource, wsource *graviton.Store, old_version uint64, new_ver
if write_tree, err = write_ss.GetTree(string(scid)); err != nil {
return
}
c := old_tree.Cursor()
c := new_tree.Cursor()
for k, v, err := c.First(); err == nil; k, v, err = c.Next() {
write_tree.Put(k, v)
}
Expand Down Expand Up @@ -399,6 +415,13 @@ func rewrite_graviton_store(store *storage, prune_topoheight int64, max_topoheig

}
}
if err != nil {
return
}

if i%1000 == 0 {
globals.Logger.Info("Commiting block to block changes", "done", float64(i*100)/float64(max_topoheight))
}
}

// now lets store all the commit versions in 1 go
Expand Down Expand Up @@ -444,6 +467,10 @@ func rewrite_graviton_store(store *storage, prune_topoheight int64, max_topoheig
return err
}

if i%1000 == 0 {
globals.Logger.Info("Rewriting entries", "done", float64(i*100)/float64(len(new_entries)))
}

}
}

Expand All @@ -456,6 +483,9 @@ func rewrite_graviton_store(store *storage, prune_topoheight int64, max_topoheig
globals.Logger.Error(err, "err reading toporecord", "topo", i)
return err // this is irrepairable damage
}
if i%1000 == 0 {
globals.Logger.Info("Filling gaps", "done", float64(i*100)/float64(prune_topoheight))
}
}

// now lets remove the old graviton db
Expand Down Expand Up @@ -484,5 +514,9 @@ func clone_tree_changes(old_tree, new_tree, write_tree *graviton.Tree) {
write_tree.Put(k, new_value)
}

graviton.Diff(old_tree, new_tree, nil, modify_handler, insert_handler)
delete_handler := func(k, v []byte) { // modification receives old value
write_tree.Delete(k)
}

graviton.Diff(old_tree, new_tree, delete_handler, modify_handler, insert_handler)
}
4 changes: 3 additions & 1 deletion blockchain/storefs.go
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,8 @@ func (s *storefs) migrate_old_tx() {
defer func() {
fmt.Printf("Migrated %d txs to new structure\n", migrated)
}()
} else {
return
}

if err != nil {
Expand All @@ -365,8 +367,8 @@ func (s *storefs) migrate_old_tx() {
}

copy(h[:], txhash[:])
s.WriteTX(h, data)
s.DeleteTX(h) // this will delete legacy version
s.WriteTX(h, data)
migrated++

}
Expand Down
19 changes: 16 additions & 3 deletions blockchain/storetopo.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ const TOPORECORD_SIZE int64 = 48

// this file implements a filesystem store which is used to store topo to block mapping directly in the file system and the state version directly tied
type storetopofs struct {
topomapping *os.File
topomapping *os.File
last_state_version uint64
}

func (s TopoRecord) String() string {
Expand Down Expand Up @@ -82,6 +83,7 @@ func (s *storetopofs) Read(index int64) (TopoRecord, error) {
func (s *storetopofs) Write(index int64, blid [32]byte, state_version uint64, height int64) (err error) {
var buf [TOPORECORD_SIZE]byte
var record TopoRecord
var zero_hash [32]byte

copy(buf[:], blid[:])
binary.LittleEndian.PutUint64(buf[len(record.BLOCK_ID):], state_version)
Expand All @@ -90,11 +92,18 @@ func (s *storetopofs) Write(index int64, blid [32]byte, state_version uint64, he
binary.LittleEndian.PutUint64(buf[len(record.BLOCK_ID)+8:], uint64(height))

_, err = s.topomapping.WriteAt(buf[:], index*TOPORECORD_SIZE)

s.topomapping.Sync() // looks like this is the cause of corruption
if s.last_state_version != state_version || state_version == 0 {
if blid != zero_hash { // during fast sync avoid syncing overhead
s.topomapping.Sync() // looks like this is the cause of corruption
}
}
s.last_state_version = state_version

return err
}
func (s *storetopofs) Sync() {
s.topomapping.Sync()
}

func (s *storetopofs) Clean(index int64) (err error) {
var state_version uint64
Expand Down Expand Up @@ -163,6 +172,10 @@ func (s *storetopofs) LocatePruneTopo() int64 {

prune_topo--

if prune_topo > count {
panic("invalid prune detected")
}

pruned_till = prune_topo
return prune_topo
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/derod/rpc/websocket_getwork_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ var miners_count int
var mini_found_time []int64 // this array contains a epoch timestamp in int64
var rate_lock sync.Mutex

//this function will return wrong result if too wide time glitches happen to system clock
// this function will return wrong result if too wide time glitches happen to system clock
func Counter(seconds int64) (r int) { // we need atleast 1 mini to find a rate
rate_lock.Lock()
defer rate_lock.Unlock()
Expand Down
3 changes: 1 addition & 2 deletions cmd/derod/rpc/websocket_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ type RPCServer struct {
sync.RWMutex
}

//var Exit_In_Progress bool
// var Exit_In_Progress bool
var chain *blockchain.Blockchain
var logger logr.Logger

Expand Down Expand Up @@ -296,7 +296,6 @@ func Echo(ctx context.Context, args []string) string {
var internal_server = server.NewLocal(historical_apis, nil) // uses traditional "getinfo" for compatibility reasons
// Bridge HTTP to the JSON-RPC server.
var bridge = jhttp.NewBridge(internal_server.Client)
*/
var historical_apis = handler.Map{"getinfo": handler.New(GetInfo),
"get_info": handler.New(GetInfo), // this is just an alias to above
Expand Down
4 changes: 2 additions & 2 deletions config/seed_nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@ package config
// only version 2
var Mainnet_seed_nodes = []string{
"89.38.99.117:8443",
"45.82.66.54:8080",
"109.236.81.137:8080",
"89.38.97.110:11011",
"45.82.66.55:11011",
"190.2.136.120:11011",
}

// some seed node for testnet
Expand Down
2 changes: 1 addition & 1 deletion config/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,4 @@ import "github.com/blang/semver/v4"

// right now it has to be manually changed
// do we need to include git commitsha??
var Version = semver.MustParse("3.5.2-113.DEROHE.STARGATE+01102022")
var Version = semver.MustParse("3.5.2-114.DEROHE.STARGATE+01102022")
2 changes: 1 addition & 1 deletion cryptography/crypto/algebra_fieldvector.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ func (fv *FieldVector) Slice(start, end int) *FieldVector {
return &result
}

//copy and return
// copy and return
func (fv *FieldVector) Clone() *FieldVector {
return fv.Slice(0, len(fv.vector))
}
Expand Down
Loading

0 comments on commit 358fab6

Please sign in to comment.