From 958e962d20864e73747610762fae2dcecb126994 Mon Sep 17 00:00:00 2001 From: Aleksandr Snopov Date: Wed, 21 Feb 2024 14:27:07 +0300 Subject: [PATCH 1/2] set patch-append-part-size for small files --- internal/file.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/file.go b/internal/file.go index e87c576..81a7903 100644 --- a/internal/file.go +++ b/internal/file.go @@ -1170,7 +1170,7 @@ func (inode *Inode) patchSimpleObj(bufs []*FileBuffer) { go func() { inode.mu.Lock() - inode.patchFromBuffers(bufs, 0) + inode.patchFromBuffers(bufs, inode.fs.flags.SinglePartMB*1024*1024) inode.UnlockRange(0, size, true) inode.IsFlushing -= inode.fs.flags.MaxParallelParts From 0aa1aa888f8ca595711ed6afe874fd5a1dfb3ae4 Mon Sep 17 00:00:00 2001 From: Aleksandr Snopov Date: Wed, 21 Feb 2024 14:27:22 +0300 Subject: [PATCH 2/2] patch bugfixes --- internal/file.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/file.go b/internal/file.go index 81a7903..4d6e212 100644 --- a/internal/file.go +++ b/internal/file.go @@ -1098,18 +1098,17 @@ func (inode *Inode) patchObjectRanges() (initiated bool) { smallFile := inode.Attributes.Size <= inode.fs.flags.SinglePartMB*1024*1024 wantFlush := inode.fileHandles == 0 || inode.forceFlush || atomic.LoadInt32(&inode.fs.wantFree) > 0 - if smallFile && wantFlush { - if inode.flushLimitsExceeded() { + if smallFile { + if inode.flushLimitsExceeded() || !wantFlush { return } - flushBufs := inode.buffers.Select(0, inode.Attributes.Size, func(buf *FileBuffer) bool { return buf.state == BUF_DIRTY; }) + flushBufs := inode.buffers.Select(0, inode.Attributes.Size, func(buf *FileBuffer) bool { return buf.state == BUF_DIRTY }) inode.patchSimpleObj(flushBufs) return true } updatedPartID := inode.fs.partNum(inode.lastWriteEnd) - var prevSize uint64 inode.buffers.IterateDirtyParts(func(part uint64) bool { if inode.flushLimitsExceeded() { return false @@ -1121,15 +1120,16 @@ func (inode *Inode) patchObjectRanges() (initiated bool) { return false } + _, prevSize := inode.fs.partRange(MaxUInt64(part-1, 0)) + partEnd, rangeBorder := partStart+partSize, partSize != prevSize appendPatch, newPart := partEnd > inode.knownSize, partStart == inode.knownSize // When entering a new part range, we can't immediately switch to the new part size, // because we need to init a new part first. - if newPart && rangeBorder && prevSize > 0 { + if newPart && rangeBorder { partEnd, partSize = partStart+prevSize, prevSize } - prevSize = partSize smallTail := appendPatch && inode.Attributes.Size-partStart < partSize if smallTail && !wantFlush { @@ -1137,12 +1137,13 @@ func (inode *Inode) patchObjectRanges() (initiated bool) { } partLocked := inode.IsRangeLocked(partStart, partEnd, true) - if !wantFlush && part == updatedPartID || partLocked { + if partLocked || !wantFlush && part == updatedPartID { return true } inode.buffers.SplitAt(partStart) inode.buffers.SplitAt(partEnd) + flushBufs := inode.buffers.Select(partStart, partEnd, func(buf *FileBuffer) bool { return buf.state == BUF_DIRTY && (!buf.zero || wantFlush || appendPatch) }) @@ -1150,7 +1151,6 @@ func (inode *Inode) patchObjectRanges() (initiated bool) { inode.patchPart(partStart, partSize, flushBufs) initiated = true } - return true }) return