-
Notifications
You must be signed in to change notification settings - Fork 38
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix/drop control call #2873
Fix/drop control call #2873
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,11 +1,8 @@ | ||
package engine | ||
|
||
import ( | ||
"errors" | ||
|
||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" | ||
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" | ||
objectSDK "github.com/nspcc-dev/neofs-sdk-go/object" | ||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id" | ||
"go.uber.org/zap" | ||
) | ||
|
@@ -58,102 +55,25 @@ | |
defer elapsed(e.metrics.AddDeleteDuration)() | ||
} | ||
|
||
var locked struct { | ||
is bool | ||
err apistatus.ObjectLocked | ||
} | ||
var splitInfo *objectSDK.SplitInfo | ||
|
||
// Removal of a big object is done in multiple stages: | ||
// 1. Remove the parent object. If it is locked or already removed, return immediately. | ||
// 2. Otherwise, search for all objects with a particular SplitID and delete them too. | ||
e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { | ||
var existsPrm shard.ExistsPrm | ||
existsPrm.SetAddress(prm.addr) | ||
|
||
resExists, err := sh.Exists(existsPrm) | ||
if err != nil { | ||
if shard.IsErrRemoved(err) || shard.IsErrObjectExpired(err) { | ||
return true | ||
} | ||
|
||
var splitErr *objectSDK.SplitInfoError | ||
if !errors.As(err, &splitErr) { | ||
if !shard.IsErrNotFound(err) { | ||
e.reportShardError(sh, "could not check object existence", err) | ||
} | ||
return false | ||
} | ||
splitInfo = splitErr.SplitInfo() | ||
} else if !resExists.Exists() { | ||
return false | ||
} | ||
|
||
var shPrm shard.InhumePrm | ||
shPrm.MarkAsGarbage(prm.addr) | ||
if prm.forceRemoval { | ||
shPrm.ForceRemoval() | ||
} | ||
|
||
_, err = sh.Inhume(shPrm) | ||
if !prm.forceRemoval { | ||
locked, err := e.isLocked(prm.addr) | ||
if err != nil { | ||
e.reportShardError(sh, "could not inhume object in shard", err) | ||
|
||
locked.is = errors.As(err, &locked.err) | ||
|
||
return locked.is | ||
} | ||
|
||
// If a parent object is removed we should set GC mark on each shard. | ||
return splitInfo == nil | ||
}) | ||
|
||
if locked.is { | ||
return DeleteRes{}, locked.err | ||
} | ||
|
||
if splitInfo != nil { | ||
if splitID := splitInfo.SplitID(); splitID != nil { | ||
e.deleteChildren(prm.addr, prm.forceRemoval, *splitID) | ||
e.log.Warn("deleting an object without full locking check", | ||
zap.Error(err), | ||
zap.Stringer("addr", prm.addr)) | ||
} else if locked { | ||
var lockedErr apistatus.ObjectLocked | ||
return DeleteRes{}, lockedErr | ||
} | ||
} | ||
|
||
return DeleteRes{}, nil | ||
} | ||
|
||
func (e *StorageEngine) deleteChildren(addr oid.Address, force bool, splitID objectSDK.SplitID) { | ||
var fs objectSDK.SearchFilters | ||
fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID) | ||
|
||
var selectPrm shard.SelectPrm | ||
selectPrm.SetFilters(fs) | ||
selectPrm.SetContainerID(addr.Container()) | ||
|
||
var inhumePrm shard.InhumePrm | ||
if force { | ||
inhumePrm.MarkAsGarbage(prm.addr) | ||
if prm.forceRemoval { | ||
inhumePrm.ForceRemoval() | ||
} | ||
|
||
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { | ||
res, err := sh.Select(selectPrm) | ||
if err != nil { | ||
e.log.Warn("error during searching for object children", | ||
zap.Stringer("addr", addr), | ||
zap.String("error", err.Error())) | ||
return false | ||
} | ||
|
||
for _, addr := range res.AddressList() { | ||
inhumePrm.MarkAsGarbage(addr) | ||
_, err := e.inhumeAddr(prm.addr, inhumePrm) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Looks like it was a bit more efficient previously with sorted shards. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why do you think it is unsorted now? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
in There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. you mean this? it is root object handling, there is no information about link and last objects that can be acquired via root object's ID
cthulhu-rider marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
_, err = sh.Inhume(inhumePrm) | ||
if err != nil { | ||
e.log.Debug("could not inhume object in shard", | ||
zap.Stringer("addr", addr), | ||
zap.String("err", err.Error())) | ||
continue | ||
} | ||
} | ||
return false | ||
}) | ||
return DeleteRes{}, err | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This one differs from the two implementations.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes, that is the key of my suggestion: why do we differ this logic? we do the same things (and one may always have more or less num of bugs) when delete/inhume objects, why some of them should be collected (if an object is a big one) one way while the other should be handled differently? about this PR: i have improved and fixed a little
Inhume
some time ago and could not understand why i had do it again one more time when looking atDelete