Skip to content

Commit

Permalink
Merge remote-tracking branch 'ddvk/master' into ddvk-patches
Browse files Browse the repository at this point in the history
It kind of reverts some of my changes, but I'm ok with that.
  • Loading branch information
mvaled committed Sep 29, 2024
2 parents 8df604b + a0c746f commit e3aa805
Show file tree
Hide file tree
Showing 32 changed files with 642 additions and 443 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/go.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ jobs:
runs-on: ubuntu-latest
steps:

- name: Set up Go 1.22
uses: actions/setup-go@v3
- name: Set up Go 1.23.1
uses: actions/setup-go@v5
with:
go-version: 1.22
go-version: 1.23.1
id: go

- name: Check out code into the Go module directory
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ jobs:
runs-on: ubuntu-latest
steps:

- name: Set up Go 1.22
uses: actions/setup-go@v3
- name: Set up Go 1.23.1
uses: actions/setup-go@v5
with:
go-version: 1.22
go-version: 1.23.1
id: go

- name: Check out code into the Go module directory
Expand Down
6 changes: 5 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,11 @@

## Unreleased

- Allow to pass the source directory in 'mput'.
- Merge from ddvk:
- fix sync api
- add rudimentary globbing for ls,rm
- add trash folder
- add orphaned files to root

## rmapi 0.0.26 (2024-03-29)

Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# rMAPI

This is (recent) fork of the original [rMAPI](https://github.com/juruen/rmapi)
This is a (recent) fork of the original [rMAPI](https://github.com/juruen/rmapi)
which is now [unmantained](https://github.com/juruen/rmapi/discussions/313).
While I won't claim that I will keep this up-to-date, I will try to do my best
*until a good alternative* is out there. I have a big library of books and
Expand Down
4 changes: 2 additions & 2 deletions api/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ type ApiCtx interface {
CreateDir(parentId, name string, notify bool) (*model.Document, error)
UploadDocument(parentId string, sourceDocPath string, notify bool) (*model.Document, error)
MoveEntry(src, dstDir *model.Node, name string) (*model.Node, error)
DeleteEntry(node *model.Node) error
DeleteEntry(node *model.Node, recursive, notify bool) error
SyncComplete() error
Nuke() error
Refresh() error
Refresh() (string, int64, error)
}

type UserToken struct {
Expand Down
85 changes: 35 additions & 50 deletions api/sync15/apictx.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func CreateCtx(http *transport.HttpClientCtx) (*ApiCtx, error) {
}
err = cacheTree.Mirror(apiStorage, concurrent)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to mirror %v", err)
}
saveTree(cacheTree)
tree := DocumentsFileTree(cacheTree)
Expand All @@ -57,13 +57,13 @@ func (ctx *ApiCtx) Filetree() *filetree.FileTreeCtx {
return ctx.ft
}

func (ctx *ApiCtx) Refresh() error {
func (ctx *ApiCtx) Refresh() (string, int64, error) {
err := ctx.hashTree.Mirror(ctx.blobStorage, concurrent)
if err != nil {
return err
return "", 0, err
}
ctx.ft = DocumentsFileTree(ctx.hashTree)
return nil
return ctx.hashTree.Hash, ctx.hashTree.Generation, nil
}

// Nuke removes all documents from the account
Expand All @@ -72,12 +72,8 @@ func (ctx *ApiCtx) Nuke() (err error) {
ctx.hashTree.Docs = nil
ctx.hashTree.Rehash()
return nil
})

if err != nil {
return
}
return ctx.SyncComplete()
}, true)
return err
}

// FetchDocument downloads a document given its ID and saves it locally into dstPath
Expand All @@ -99,7 +95,7 @@ func (ctx *ApiCtx) FetchDocument(docId, dstPath string) error {
defer w.Close()
for _, f := range doc.Files {
log.Trace.Println("fetching document: ", f.DocumentID)
blobReader, err := ctx.blobStorage.GetReader(f.Hash)
blobReader, err := ctx.blobStorage.GetReader(f.Hash, f.DocumentID)
if err != nil {
return err
}
Expand Down Expand Up @@ -173,7 +169,8 @@ func (ctx *ApiCtx) CreateDir(parentId, name string, notify bool) (*model.Documen
if err != nil {
return nil, err
}
err = ctx.blobStorage.UploadBlob(hashStr, reader)
err = ctx.blobStorage.UploadBlob(hashStr, f.Name, reader)
reader.Close()

if err != nil {
return nil, err
Expand All @@ -187,15 +184,15 @@ func (ctx *ApiCtx) CreateDir(parentId, name string, notify bool) (*model.Documen
if err != nil {
return nil, err
}
defer indexReader.Close()
err = ctx.blobStorage.UploadBlob(doc.Hash, indexReader)
// defer indexReader.Close()
err = ctx.blobStorage.UploadBlob(doc.Hash, addSchema(doc.DocumentID), indexReader)
if err != nil {
return nil, err
}

err = Sync(ctx.blobStorage, ctx.hashTree, func(t *HashTree) error {
return t.Add(doc)
})
}, notify)

if err != nil {
return nil, err
Expand All @@ -212,11 +209,11 @@ func (ctx *ApiCtx) CreateDir(parentId, name string, notify bool) (*model.Documen
}

// Sync applies changes to the local tree and syncs with the remote storage
func Sync(b *BlobStorage, tree *HashTree, operation func(t *HashTree) error) error {
synccount := 0
func Sync(b *BlobStorage, tree *HashTree, operation func(t *HashTree) error, notify bool) error {
syncTry := 0
for {
synccount++
if synccount > 10 {
syncTry++
if syncTry > 10 {
log.Error.Println("Something is wrong")
break
}
Expand All @@ -230,15 +227,16 @@ func Sync(b *BlobStorage, tree *HashTree, operation func(t *HashTree) error) err
if err != nil {
return err
}
err = b.UploadBlob(tree.Hash, indexReader)
err = b.UploadBlob(tree.Hash, addSchema("root"), indexReader)
if err != nil {
return err
}
defer indexReader.Close()
// TODO
// defer indexReader.Close()

log.Info.Println("updating root, old gen: ", tree.Generation)

newGeneration, err := b.WriteRootIndex(tree.Hash, tree.Generation)
newGeneration, err := b.WriteRootIndex(tree.Hash, tree.Generation, notify)

if err == nil {
log.Info.Println("wrote root, new gen: ", newGeneration)
Expand All @@ -262,19 +260,15 @@ func Sync(b *BlobStorage, tree *HashTree, operation func(t *HashTree) error) err
}

// DeleteEntry removes an entry: either an empty directory or a file
func (ctx *ApiCtx) DeleteEntry(node *model.Node) error {
if node.IsDirectory() && len(node.Children) > 0 {
func (ctx *ApiCtx) DeleteEntry(node *model.Node, recursive, notify bool) error {
if node.IsDirectory() && len(node.Children) > 0 && !recursive {
return errors.New("directory is not empty")
}

err := Sync(ctx.blobStorage, ctx.hashTree, func(t *HashTree) error {
return t.Remove(node.Document.ID)
})
if err != nil {
return err
}

return ctx.SyncComplete()
}, notify)
return err
}

// MoveEntry moves an entry (either a directory or a file)
Expand All @@ -292,7 +286,7 @@ func (ctx *ApiCtx) MoveEntry(src, dstDir *model.Node, name string) (*model.Node,
if err != nil {
return err
}
doc.Metadata.Version += 1
doc.Metadata.Version++
doc.Metadata.DocName = name
doc.Metadata.Parent = dstDir.Id()
doc.Metadata.MetadataModified = true
Expand All @@ -311,7 +305,7 @@ func (ctx *ApiCtx) MoveEntry(src, dstDir *model.Node, name string) (*model.Node,
return err
}

err = ctx.blobStorage.UploadBlob(hashStr, reader)
err = ctx.blobStorage.UploadBlob(hashStr, doc.DocumentID, reader)

if err != nil {
return err
Expand All @@ -322,19 +316,14 @@ func (ctx *ApiCtx) MoveEntry(src, dstDir *model.Node, name string) (*model.Node,
if err != nil {
return err
}
defer indexReader.Close()
return ctx.blobStorage.UploadBlob(doc.Hash, indexReader)
})
// defer indexReader.Close()
return ctx.blobStorage.UploadBlob(doc.Hash, addSchema(doc.DocumentID), indexReader)
}, true)

if err != nil {
return nil, err
}

err = ctx.SyncComplete()
if err != nil {
return nil, err
}

d, err := ctx.hashTree.FindDoc(src.Document.ID)
if err != nil {
return nil, err
Expand Down Expand Up @@ -388,7 +377,7 @@ func (ctx *ApiCtx) UploadDocument(parentId string, sourceDocPath string, notify
if err != nil {
return nil, err
}
err = ctx.blobStorage.UploadBlob(hashStr, reader)
err = ctx.blobStorage.UploadBlob(hashStr, fileEntry.DocumentID, reader)

if err != nil {
return nil, err
Expand All @@ -402,25 +391,19 @@ func (ctx *ApiCtx) UploadDocument(parentId string, sourceDocPath string, notify
if err != nil {
return nil, err
}
defer indexReader.Close()
err = ctx.blobStorage.UploadBlob(doc.Hash, indexReader)
// defer indexReader.Close()
err = ctx.blobStorage.UploadBlob(doc.Hash, addSchema(doc.DocumentID), indexReader)
if err != nil {
return nil, err
}

err = Sync(ctx.blobStorage, ctx.hashTree, func(t *HashTree) error {
return t.Add(doc)
})
}, notify)

if err != nil {
return nil, err
}
if notify {
err = ctx.SyncComplete()
if err != nil {
return nil, err
}
}

return doc.ToDocument(), nil
}
Expand All @@ -442,8 +425,10 @@ func DocumentsFileTree(tree *HashTree) *filetree.FileTreeCtx {
fileTree := filetree.CreateFileTreeCtx()

for _, d := range documents {
log.Trace.Println("adding doc: ", d.ID)
fileTree.AddDocument(d)
}
fileTree.FinishAdd()

return &fileTree
}
Expand Down
26 changes: 10 additions & 16 deletions api/sync15/blobdoc.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package sync15

import (
"bufio"
"bytes"
"crypto/sha256"
"encoding/hex"
Expand Down Expand Up @@ -88,24 +87,19 @@ func (t *HashTree) Add(d *BlobDoc) error {
return t.Rehash()
}

func (d *BlobDoc) IndexReader() (io.ReadCloser, error) {
func (d *BlobDoc) IndexReader() (io.Reader, error) {
if len(d.Files) == 0 {
return nil, errors.New("no files")
}
pipeReader, pipeWriter := io.Pipe()
w := bufio.NewWriter(pipeWriter)
go func() {
defer pipeWriter.Close()
w.WriteString(SchemaVersion)
var w bytes.Buffer
w.WriteString(SchemaVersion)
w.WriteString("\n")
for _, d := range d.Files {
w.WriteString(d.Line())
w.WriteString("\n")
for _, d := range d.Files {
w.WriteString(d.Line())
w.WriteString("\n")
}
w.Flush()
}()
}

return pipeReader, nil
return bytes.NewReader(w.Bytes()), nil
}

// ReadMetadata the document metadata from remote blob
Expand All @@ -115,7 +109,7 @@ func (d *BlobDoc) ReadMetadata(fileEntry *Entry, r RemoteStorage) error {

metadata := archive.MetadataFile{}

meta, err := r.GetReader(fileEntry.Hash)
meta, err := r.GetReader(fileEntry.Hash, fileEntry.DocumentID)
if err != nil {
return err
}
Expand Down Expand Up @@ -157,7 +151,7 @@ func (d *BlobDoc) Line() string {
// Mirror updates the document to be the same as the remote
func (d *BlobDoc) Mirror(e *Entry, r RemoteStorage) error {
d.Entry = *e
entryIndex, err := r.GetReader(e.Hash)
entryIndex, err := r.GetReader(e.Hash, e.DocumentID)
if err != nil {
return err
}
Expand Down
Loading

0 comments on commit e3aa805

Please sign in to comment.