Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add tracing and Log with usec timestamps #57

Open
wants to merge 3 commits into
base: development
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 26 additions & 6 deletions fs/api_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,13 +170,15 @@ func (vS *volumeStruct) inFlightFileInodeDataFlusher(inodeNumber inode.InodeNumb
}

func (inFlightFileInodeData *inFlightFileInodeDataStruct) inFlightFileInodeDataTracker() {
var (
flushFirst bool
)

logger.Tracef("fs.inFlightFileInodeDataTracker(): waiting to flush volume '%s' inode %d",
logger.Tracef("fs.inFlightFileInodeDataTracker() entry: waiting to flush volume '%s' inode %d",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Logger already adds the function name to the log message so you should not have to add the function name here.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For example, here is an example log message that I see when I am using this PR:

time="2017-12-11T21:00:11.737919Z" level=info msg="vs.Write(): growing file size 999424 to 1000000 volume 'CommonVolume' inode 204 offset 999424 len 576" function=Write goroutine=981 package=inode

inFlightFileInodeData.volStruct.volumeName, inFlightFileInodeData.InodeNumber)
defer logger.Tracef("fs.inFlightFileInodeDataTracker() return: flush volume '%s' inode %d",
inFlightFileInodeData.volStruct.volumeName, inFlightFileInodeData.InodeNumber)

var (
flushFirst bool
)
select {
case flushFirst = <-inFlightFileInodeData.control:
// All we needed was the value of flushFirst from control chan
Expand Down Expand Up @@ -310,6 +312,8 @@ func (mS *mountStruct) doInlineCheckpoint() {
var (
err error
)
logger.Tracef("fs.doInlineCheckpoint() entry: volume '%s'", mS.volStruct.volumeName)
defer logger.Tracef("fs.doInlineCheckpoint() return: volume '%s'", mS.volStruct.volumeName)

if nil == mS.headhunterVolumeHandle {
mS.headhunterVolumeHandle, err = headhunter.FetchVolumeHandle(mS.volStruct.volumeName)
Expand All @@ -325,6 +329,11 @@ func (mS *mountStruct) doInlineCheckpoint() {
}

func (mS *mountStruct) Flush(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (err error) {

logger.Tracef("fs.Flush() entry: volume '%s' inode %d", mS.volStruct.volumeName, inodeNumber)
defer logger.Tracef("fs.Flush() return: volume '%s' inode %d", mS.volStruct.volumeName, inodeNumber)

// checkpoint after the flush completes
defer mS.doInlineCheckpoint()

inodeLock, err := mS.volStruct.initInodeLock(inodeNumber, nil)
Expand All @@ -338,9 +347,11 @@ func (mS *mountStruct) Flush(userID inode.InodeUserID, groupID inode.InodeGroupI
defer inodeLock.Unlock()

if !mS.volStruct.VolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK) {
logger.Tracef("fs.Flush(): volume '%s' inode %d err ENOENT", mS.volStruct.volumeName, inodeNumber)
return blunder.NewError(blunder.NotFoundError, "ENOENT")
}
if !mS.volStruct.VolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK) {
logger.Tracef("fs.Flush(): volume '%s' inode %d err EACCES", mS.volStruct.volumeName, inodeNumber)
return blunder.NewError(blunder.PermDeniedError, "EACCES")
}

Expand Down Expand Up @@ -2934,7 +2945,9 @@ func (mS *mountStruct) VolumeName() (volumeName string) {

func (mS *mountStruct) Write(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, offset uint64, buf []byte, profiler *utils.Profiler) (size uint64, err error) {

logger.Tracef("fs.Write(): starting volume '%s' inode %d offset %d len %d",
logger.Tracef("fs.Write() entry: volume '%s' inode %d offset %d len %d",
mS.volStruct.volumeName, inodeNumber, offset, len(buf))
defer logger.Tracef("fs.Write() return: volume '%s' inode %d offset %d len %d",
mS.volStruct.volumeName, inodeNumber, offset, len(buf))

inodeLock, err := mS.volStruct.initInodeLock(inodeNumber, nil)
Expand All @@ -2949,10 +2962,14 @@ func (mS *mountStruct) Write(userID inode.InodeUserID, groupID inode.InodeGroupI

if !mS.volStruct.VolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
logger.Tracef("fs.Write() error: volume '%s' inode %d offset %d len %d err %v",
mS.volStruct.volumeName, inodeNumber, offset, len(buf), err)
return
}
if !mS.volStruct.VolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
logger.Tracef("fs.Write() error: volume '%s' inode %d offset %d len %d err %v",
mS.volStruct.volumeName, inodeNumber, offset, len(buf), err)
return
}

Expand All @@ -2961,13 +2978,16 @@ func (mS *mountStruct) Write(userID inode.InodeUserID, groupID inode.InodeGroupI
profiler.AddEventNow("after inode.Write()")
// write to Swift presumably succeeds or fails as a whole
if err != nil {
logger.Tracef("fs.Write() error: volume '%s' inode %d offset %d len %d err %v",
mS.volStruct.volumeName, inodeNumber, offset, len(buf), err)
return 0, err
}

logger.Tracef("fs.Write(): tracking write volume '%s' inode %d", mS.volStruct.volumeName, inodeNumber)
logger.Tracef("fs.Write(): tracking write to volume '%s' inode %d", mS.volStruct.volumeName, inodeNumber)
mS.volStruct.trackInFlightFileInodeData(inodeNumber)
size = uint64(len(buf))
stats.IncrementOperations(&stats.FsWriteOps)

return
}

Expand Down
16 changes: 14 additions & 2 deletions headhunter/api_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -1083,6 +1083,10 @@ func (volume *volumeStruct) FetchNonce() (nonce uint64, err error) {
}

func (volume *volumeStruct) GetInodeRec(inodeNumber uint64) (value []byte, ok bool, err error) {

logger.Tracef("headhunger.GetInodeRec() entry: volume '%s' inode %d", volume.volumeName, inodeNumber)
defer logger.Tracef("headhunger.GetInodeRec() return: volume '%s' inode %d", volume.volumeName, inodeNumber)

volume.Lock()
valueAsValue, ok, err := volume.inodeRecWrapper.bPlusTree.GetByKey(inodeNumber)
if nil != err {
Expand All @@ -1104,7 +1108,8 @@ func (volume *volumeStruct) GetInodeRec(inodeNumber uint64) (value []byte, ok bo

func (volume *volumeStruct) PutInodeRec(inodeNumber uint64, value []byte) (err error) {

logger.Tracef("headhunger.PutInodeRec(): volume '%s' inode %d", volume.volumeName, inodeNumber)
logger.Tracef("headhunger.PutInodeRec() entry: volume '%s' inode %d", volume.volumeName, inodeNumber)
defer logger.Tracef("headhunger.PutInodeRec() return: volume '%s' inode %d", volume.volumeName, inodeNumber)

valueToTree := make([]byte, len(value))
copy(valueToTree, value)
Expand All @@ -1130,7 +1135,8 @@ func (volume *volumeStruct) PutInodeRec(inodeNumber uint64, value []byte) (err e

func (volume *volumeStruct) PutInodeRecs(inodeNumbers []uint64, values [][]byte) (err error) {

logger.Tracef("headhunter.PutInodeRecs(): volume '%s' inodes %v", volume.volumeName, inodeNumbers)
logger.Tracef("headhunter.PutInodeRecs() entry: volume '%s' inodes %v", volume.volumeName, inodeNumbers)
defer logger.Tracef("headhunter.PutInodeRecs() return: volume '%s' inodes %v", volume.volumeName, inodeNumbers)

if len(inodeNumbers) != len(values) {
err = fmt.Errorf("InodeNumber and Values array don't match")
Expand Down Expand Up @@ -1196,6 +1202,12 @@ func (volume *volumeStruct) GetLogSegmentRec(logSegmentNumber uint64) (value []b
}

func (volume *volumeStruct) PutLogSegmentRec(logSegmentNumber uint64, value []byte) (err error) {

logger.Tracef("headhunter.PutLogSegmentRec() entry: volume '%s' Segment %v",
volume.volumeName, logSegmentNumber)
defer logger.Tracef("headhunter.PutLogSegmentRec() return: volume '%s' Segment %v",
volume.volumeName, logSegmentNumber)

valueToTree := make([]byte, len(value))
copy(valueToTree, value)

Expand Down
50 changes: 48 additions & 2 deletions inode/file.go
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,14 @@ func decrementLogSegmentMapFileData(fileInode *inMemoryInodeStruct, logSegmentNu
// `recordWrite` is called by `Write` and `Wrote` to update the file inode
// payload's record of the extents that compose the file.
func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint64, logSegmentNumber uint64, logSegmentOffset uint64) (err error) {

logger.Tracef(
"recordWrite() entry: volume '%s' inode %d offset %d len %d stored in logSegment %d offset %d",
fileInode.volume.volumeName, fileInode.InodeNumber, fileOffset, length,
logSegmentNumber, logSegmentOffset)
defer logger.Tracef("recordWrite() return: volume '%s' inode %d offset %d len %d",
fileInode.volume.volumeName, fileInode.InodeNumber, fileOffset, length)

extents := fileInode.payload.(sortedmap.BPlusTree)

// First we need to eliminate extents or portions thereof that overlap the specified write
Expand All @@ -426,7 +434,14 @@ func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint6
}
leftExtent := extentValue.(*fileExtentStruct)
if (leftExtent.FileOffset + leftExtent.Length) > fileOffset {

// Yes, we need to split the preceeding extent
logger.Tracef("recordWrite(): splitting extent volume '%s' inode %d"+
" offset %d len %d logSegmentNumber %d logSegmentOffset %d",
fileInode.volume.volumeName, fileInode.InodeNumber,
leftExtent.FileOffset, leftExtent.Length,
leftExtent.LogSegmentNumber, leftExtent.LogSegmentOffset)

splitOutSize := (leftExtent.FileOffset + leftExtent.Length) - fileOffset
leftExtent.Length -= splitOutSize
ok, patchByIndexErr := extents.PatchByIndex(extentIndex, leftExtent)
Expand Down Expand Up @@ -472,7 +487,14 @@ func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint6
break
}
if (fileOffset + length) >= (leftExtent.FileOffset + leftExtent.Length) {

// This extent entirely overwritten... just delete it
logger.Tracef("recordWrite(): deleting extent volume '%s' inode %d"+
" offset %d len %d logSegmentNumber %d logSegmentOffset %d",
fileInode.volume.volumeName, fileInode.InodeNumber,
leftExtent.FileOffset, leftExtent.Length,
leftExtent.LogSegmentNumber, leftExtent.LogSegmentOffset)

decrementLogSegmentMapFileData(fileInode, leftExtent.LogSegmentNumber, leftExtent.Length)
ok, deleteByIndexErr := extents.DeleteByIndex(extentIndex)
if nil != deleteByIndexErr {
Expand All @@ -492,6 +514,12 @@ func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint6
unexpectedErr := fmt.Errorf("unexpected extents indexing problem")
panic(unexpectedErr)
}
logger.Tracef("recordWrite(): pruning extent volume '%s' inode %d"+
" offset %d len %d logSegmentNumber %d logSegmentOffset %d",
fileInode.volume.volumeName, fileInode.InodeNumber,
leftExtent.FileOffset, leftExtent.Length,
leftExtent.LogSegmentNumber, leftExtent.LogSegmentOffset)

overlapSize := (fileOffset + length) - leftExtent.FileOffset
rightExtent := &fileExtentStruct{
FileOffset: leftExtent.FileOffset + overlapSize,
Expand Down Expand Up @@ -541,7 +569,13 @@ func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint6
}

if (nil != prevExtent) && (prevExtent.LogSegmentNumber == logSegmentNumber) && ((prevExtent.FileOffset + prevExtent.Length) == fileOffset) && ((prevExtent.LogSegmentOffset + prevExtent.Length) == logSegmentOffset) {

// APPEND Case: We are able to simply lengthen prevExtent
logger.Tracef("recordWrite(): growing extent volume '%s' inode %d"+
" offset %d len %d logSegmentNumber %d logSegmentOffset %d",
fileInode.volume.volumeName, fileInode.InodeNumber,
prevExtent.FileOffset, prevExtent.Length,
prevExtent.LogSegmentNumber, prevExtent.LogSegmentOffset)

prevExtent.Length += length
ok, patchByIndexErr := extents.PatchByIndex(prevIndex, prevExtent)
Expand All @@ -554,6 +588,10 @@ func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint6
}
} else {
// Non-APPEND Case: We need to insert a new extent
logger.Tracef("recordWrite(): inserting extent volume '%s' inode %d"+
" offset %d len %d logSegmentNumber %d logSegmentOffset %d",
fileInode.volume.volumeName, fileInode.InodeNumber,
fileOffset, length, logSegmentNumber, logSegmentOffset)

newExtent := &fileExtentStruct{
FileOffset: fileOffset,
Expand Down Expand Up @@ -584,8 +622,6 @@ func (vS *volumeStruct) Write(fileInodeNumber InodeNumber, offset uint64, buf []
return
}

fileInode.dirty = true

logSegmentNumber, logSegmentOffset, err := vS.doSendChunk(fileInode, buf)
if nil != err {
logger.ErrorWithError(err)
Expand All @@ -605,6 +641,10 @@ func (vS *volumeStruct) Write(fileInodeNumber InodeNumber, offset uint64, buf []
offsetJustAfterWhereBufLogicallyWritten := offset + length

if offsetJustAfterWhereBufLogicallyWritten > startingSize {
logger.Tracef("vs.Write(): growing file size %d to %d volume '%s' inode %d offset %d len %d",
fileInode.Size, offsetJustAfterWhereBufLogicallyWritten,
vS.volumeName, fileInodeNumber, offset, len(buf))

fileInode.Size = offsetJustAfterWhereBufLogicallyWritten
}

Expand All @@ -617,6 +657,7 @@ func (vS *volumeStruct) Write(fileInodeNumber InodeNumber, offset uint64, buf []
fileInode.AttrChangeTime = updateTime
fileInode.ModificationTime = updateTime
fileInode.NumWrites++
fileInode.dirty = true
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am curious - did you have a specific test which was failing that caused you to find this bug?
Or, simply by reading the code did you notice this was not set correctly?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh - I see you just moved it down further with the rest of the changes.


return
}
Expand Down Expand Up @@ -673,13 +714,18 @@ func (vS *volumeStruct) Wrote(fileInodeNumber InodeNumber, fileOffset uint64, ob

if patchOnly {
if (fileOffset + length) > fileInode.Size {
logger.Tracef("vs.Wrote(): growing file size %d to %d volume '%s' inode %d",
fileInode.Size, fileOffset+length, vS.volumeName, fileInodeNumber)

fileInode.Size = fileOffset + length
}

updateTime := time.Now()
fileInode.ModificationTime = updateTime
fileInode.AttrChangeTime = updateTime
} else {
logger.Tracef("vs.Wrote(): setting file size to %d volume '%s' inode %d",
length, vS.volumeName, fileInodeNumber)
err = setSizeInMemory(fileInode, length)
if err != nil {
logger.ErrorWithError(err)
Expand Down
7 changes: 7 additions & 0 deletions inode/file_flusher.go
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,13 @@ func (vS *volumeStruct) doSendChunk(fileInode *inMemoryInodeStruct, buf []byte)
}

func (vS *volumeStruct) doFileInodeDataFlush(fileInode *inMemoryInodeStruct) (err error) {

logger.Tracef("volumeStruct.doFileInodeDataFlush(): entry volume '%s' inode %d dirty %v openSegment %p",
vS.volumeName, fileInode.onDiskInodeV1Struct.InodeNumber,
fileInode.dirty, fileInode.openLogSegment)
defer logger.Tracef("volumeStruct.doFileInodeDataFlush(): return volume '%s' inode %d",
vS.volumeName, fileInode.onDiskInodeV1Struct.InodeNumber)

fileInode.Lock()

if nil != fileInode.openLogSegment {
Expand Down
28 changes: 24 additions & 4 deletions inode/inode.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ type onDiskInodeV1Struct struct { // Preceded "on disk" by CorruptionDetected th
UserID InodeUserID
GroupID InodeGroupID
StreamMap map[string][]byte
PayloadObjectNumber uint64 // DirInode: B+Tree Root with Key == dir_entry_name, Value = InodeNumber
PayloadObjectNumber uint64 // DirInode: B+Tree Root with Key == entry_name, Value = InodeNumber
PayloadObjectLength uint64 // FileInode: B+Tree Root with Key == fileOffset, Value = fileExtent
SymlinkTarget string // SymlinkInode: target path of symbolic link
LogSegmentMap map[uint64]uint64 // FileInode: Key == LogSegment#, Value = file user data byte count
Expand Down Expand Up @@ -86,7 +86,8 @@ func (vS *volumeStruct) fetchOnDiskInode(inodeNumber InodeNumber) (inMemoryInode
version Version
)

logger.Tracef("inode.fetchOnDiskInode(): volume '%s' inode %d", vS.volumeName, inodeNumber)
logger.Tracef("inode.fetchOnDiskInode() entry: volume '%s' inode %d", vS.volumeName, inodeNumber)
defer logger.Tracef("inode.fetchOnDiskInode() return: volume '%s' inode %d", vS.volumeName, inodeNumber)

inodeRec, ok, err = vS.headhunterVolumeHandle.GetInodeRec(uint64(inodeNumber))
if nil != err {
Expand Down Expand Up @@ -368,6 +369,9 @@ func (vS *volumeStruct) flushInodes(inodes []*inMemoryInodeStruct) (err error) {
emptyLogSegments = make([]uint64, 0)

for _, inode = range inodes {
logger.Tracef("flushInodes(): flushing volume '%s' inode %d dirty %v",
vS.volumeName, inode.InodeNumber, inode.dirty)

if FileType == inode.InodeType {
err = vS.doFileInodeDataFlush(inode)
if nil != err {
Expand All @@ -378,6 +382,12 @@ func (vS *volumeStruct) flushInodes(inodes []*inMemoryInodeStruct) (err error) {
emptyLogSegmentsThisInode = make([]uint64, 0)
for logSegmentNumber, logSegmentValidBytes = range inode.LogSegmentMap {
if 0 == logSegmentValidBytes {
if !inode.dirty {
err = fmt.Errorf(
"Logic error: empy log segment so inode.dirty should be true")
logger.ErrorWithError(err)
return
}
emptyLogSegmentsThisInode = append(emptyLogSegmentsThisInode, logSegmentNumber)
}
}
Expand All @@ -397,13 +407,16 @@ func (vS *volumeStruct) flushInodes(inodes []*inMemoryInodeStruct) (err error) {
}
if payloadObjectNumber > inode.PayloadObjectNumber {
if !inode.dirty {
err = fmt.Errorf("Logic error: inode.dirty should have been true")
err = fmt.Errorf("Logic error: payload so inode.dirty should be true")
logger.ErrorWithError(err)
err = blunder.AddError(err, blunder.InodeFlushError)
return
}
inode.PayloadObjectNumber = payloadObjectNumber
inode.PayloadObjectLength = payloadObjectLength

logger.Tracef("flushInodes(): flushed payload for volume '%s' inode %d to object %d",
vS.volumeName, inode.InodeNumber, payloadObjectNumber)
}
}
if inode.dirty {
Expand All @@ -429,6 +442,9 @@ func (vS *volumeStruct) flushInodes(inodes []*inMemoryInodeStruct) (err error) {

// Go update HeadHunter (if necessary)
if 0 < len(dirtyInodeNumbers) {
logger.Tracef("flushInodes(): flushing %d dirty inodes to headhunter on volume '%s'",
len(dirtyInodeNumbers), vS.volumeName)

err = vS.headhunterVolumeHandle.PutInodeRecs(dirtyInodeNumbers, dirtyInodeRecs)
if nil != err {
logger.ErrorWithError(err)
Expand All @@ -446,6 +462,9 @@ func (vS *volumeStruct) flushInodes(inodes []*inMemoryInodeStruct) (err error) {
// Now do phase one of garbage collection
if 0 < len(emptyLogSegments) {
for _, logSegmentNumber = range emptyLogSegments {

logger.Tracef("flushInodes(): deleting log segment volume '%s' log segment %d",
vS.volumeName, logSegmentNumber)
err = vS.deleteLogSegmentAsync(logSegmentNumber, checkpointDoneWaitGroup)
if nil != err {
logger.WarnfWithError(err, "couldn't delete garbage log segment")
Expand Down Expand Up @@ -747,7 +766,8 @@ func (vS *volumeStruct) Purge(inodeNumber InodeNumber) (err error) {

func (vS *volumeStruct) Destroy(inodeNumber InodeNumber) (err error) {

logger.Tracef("inode.Destroy(): volume '%s' inode %d", vS.volumeName, inodeNumber)
logger.Tracef("inode.Destroy() entry: volume '%s' inode %d", vS.volumeName, inodeNumber)
defer logger.Tracef("inode.Destroy() return: volume '%s' inode %d", vS.volumeName, inodeNumber)

ourInode, ok, err := vS.fetchInode(inodeNumber)
if nil != err {
Expand Down
2 changes: 1 addition & 1 deletion logger/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func TestAPI(t *testing.T) {
"Stats.UDPPort=52184",
"Stats.BufferLength=100",
"Stats.MaxLatency=1s",
"Logging.TraceLevelLogging=logger",
"Logging.TraceLevelLogging=logger fs headhunter",
}

confMap, err := conf.MakeConfMapFromStrings(confStrings)
Expand Down
Loading