diff --git a/fs/api_internal.go b/fs/api_internal.go index b47b971a1..9f67d3cce 100644 --- a/fs/api_internal.go +++ b/fs/api_internal.go @@ -170,13 +170,15 @@ func (vS *volumeStruct) inFlightFileInodeDataFlusher(inodeNumber inode.InodeNumb } func (inFlightFileInodeData *inFlightFileInodeDataStruct) inFlightFileInodeDataTracker() { - var ( - flushFirst bool - ) - logger.Tracef("fs.inFlightFileInodeDataTracker(): waiting to flush volume '%s' inode %d", + logger.Tracef("fs.inFlightFileInodeDataTracker() entry: waiting to flush volume '%s' inode %d", + inFlightFileInodeData.volStruct.volumeName, inFlightFileInodeData.InodeNumber) + defer logger.Tracef("fs.inFlightFileInodeDataTracker() return: flush volume '%s' inode %d", inFlightFileInodeData.volStruct.volumeName, inFlightFileInodeData.InodeNumber) + var ( + flushFirst bool + ) select { case flushFirst = <-inFlightFileInodeData.control: // All we needed was the value of flushFirst from control chan @@ -310,6 +312,8 @@ func (mS *mountStruct) doInlineCheckpoint() { var ( err error ) + logger.Tracef("fs.doInlineCheckpoint() entry: volume '%s'", mS.volStruct.volumeName) + defer logger.Tracef("fs.doInlineCheckpoint() return: volume '%s'", mS.volStruct.volumeName) if nil == mS.headhunterVolumeHandle { mS.headhunterVolumeHandle, err = headhunter.FetchVolumeHandle(mS.volStruct.volumeName) @@ -325,6 +329,11 @@ func (mS *mountStruct) doInlineCheckpoint() { } func (mS *mountStruct) Flush(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (err error) { + + logger.Tracef("fs.Flush() entry: volume '%s' inode %d", mS.volStruct.volumeName, inodeNumber) + defer logger.Tracef("fs.Flush() return: volume '%s' inode %d", mS.volStruct.volumeName, inodeNumber) + + // checkpoint after the flush completes defer mS.doInlineCheckpoint() inodeLock, err := mS.volStruct.initInodeLock(inodeNumber, nil) @@ -338,9 +347,11 @@ func (mS *mountStruct) Flush(userID inode.InodeUserID, groupID inode.InodeGroupI defer inodeLock.Unlock() if !mS.volStruct.VolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK) { + logger.Tracef("fs.Flush(): volume '%s' inode %d err ENOENT", mS.volStruct.volumeName, inodeNumber) return blunder.NewError(blunder.NotFoundError, "ENOENT") } if !mS.volStruct.VolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK) { + logger.Tracef("fs.Flush(): volume '%s' inode %d err EACCES", mS.volStruct.volumeName, inodeNumber) return blunder.NewError(blunder.PermDeniedError, "EACCES") } @@ -2934,7 +2945,9 @@ func (mS *mountStruct) VolumeName() (volumeName string) { func (mS *mountStruct) Write(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, offset uint64, buf []byte, profiler *utils.Profiler) (size uint64, err error) { - logger.Tracef("fs.Write(): starting volume '%s' inode %d offset %d len %d", + logger.Tracef("fs.Write() entry: volume '%s' inode %d offset %d len %d", + mS.volStruct.volumeName, inodeNumber, offset, len(buf)) + defer logger.Tracef("fs.Write() return: volume '%s' inode %d offset %d len %d", mS.volStruct.volumeName, inodeNumber, offset, len(buf)) inodeLock, err := mS.volStruct.initInodeLock(inodeNumber, nil) @@ -2949,10 +2962,14 @@ func (mS *mountStruct) Write(userID inode.InodeUserID, groupID inode.InodeGroupI if !mS.volStruct.VolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK) { err = blunder.NewError(blunder.NotFoundError, "ENOENT") + logger.Tracef("fs.Write() error: volume '%s' inode %d offset %d len %d err %v", + mS.volStruct.volumeName, inodeNumber, offset, len(buf), err) return } if !mS.volStruct.VolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK) { err = blunder.NewError(blunder.PermDeniedError, "EACCES") + logger.Tracef("fs.Write() error: volume '%s' inode %d offset %d len %d err %v", + mS.volStruct.volumeName, inodeNumber, offset, len(buf), err) return } @@ -2961,13 +2978,16 @@ func (mS *mountStruct) Write(userID inode.InodeUserID, groupID inode.InodeGroupI profiler.AddEventNow("after inode.Write()") // write to Swift presumably succeeds or fails as a whole if err != nil { + logger.Tracef("fs.Write() error: volume '%s' inode %d offset %d len %d err %v", + mS.volStruct.volumeName, inodeNumber, offset, len(buf), err) return 0, err } - logger.Tracef("fs.Write(): tracking write volume '%s' inode %d", mS.volStruct.volumeName, inodeNumber) + logger.Tracef("fs.Write(): tracking write to volume '%s' inode %d", mS.volStruct.volumeName, inodeNumber) mS.volStruct.trackInFlightFileInodeData(inodeNumber) size = uint64(len(buf)) stats.IncrementOperations(&stats.FsWriteOps) + return } diff --git a/headhunter/api_internal.go b/headhunter/api_internal.go index 284e7509c..b76b8ad33 100644 --- a/headhunter/api_internal.go +++ b/headhunter/api_internal.go @@ -1083,6 +1083,10 @@ func (volume *volumeStruct) FetchNonce() (nonce uint64, err error) { } func (volume *volumeStruct) GetInodeRec(inodeNumber uint64) (value []byte, ok bool, err error) { + + logger.Tracef("headhunger.GetInodeRec() entry: volume '%s' inode %d", volume.volumeName, inodeNumber) + defer logger.Tracef("headhunger.GetInodeRec() return: volume '%s' inode %d", volume.volumeName, inodeNumber) + volume.Lock() valueAsValue, ok, err := volume.inodeRecWrapper.bPlusTree.GetByKey(inodeNumber) if nil != err { @@ -1104,7 +1108,8 @@ func (volume *volumeStruct) GetInodeRec(inodeNumber uint64) (value []byte, ok bo func (volume *volumeStruct) PutInodeRec(inodeNumber uint64, value []byte) (err error) { - logger.Tracef("headhunger.PutInodeRec(): volume '%s' inode %d", volume.volumeName, inodeNumber) + logger.Tracef("headhunger.PutInodeRec() entry: volume '%s' inode %d", volume.volumeName, inodeNumber) + defer logger.Tracef("headhunger.PutInodeRec() return: volume '%s' inode %d", volume.volumeName, inodeNumber) valueToTree := make([]byte, len(value)) copy(valueToTree, value) @@ -1130,7 +1135,8 @@ func (volume *volumeStruct) PutInodeRec(inodeNumber uint64, value []byte) (err e func (volume *volumeStruct) PutInodeRecs(inodeNumbers []uint64, values [][]byte) (err error) { - logger.Tracef("headhunter.PutInodeRecs(): volume '%s' inodes %v", volume.volumeName, inodeNumbers) + logger.Tracef("headhunter.PutInodeRecs() entry: volume '%s' inodes %v", volume.volumeName, inodeNumbers) + defer logger.Tracef("headhunter.PutInodeRecs() return: volume '%s' inodes %v", volume.volumeName, inodeNumbers) if len(inodeNumbers) != len(values) { err = fmt.Errorf("InodeNumber and Values array don't match") @@ -1196,6 +1202,12 @@ func (volume *volumeStruct) GetLogSegmentRec(logSegmentNumber uint64) (value []b } func (volume *volumeStruct) PutLogSegmentRec(logSegmentNumber uint64, value []byte) (err error) { + + logger.Tracef("headhunter.PutLogSegmentRec() entry: volume '%s' Segment %v", + volume.volumeName, logSegmentNumber) + defer logger.Tracef("headhunter.PutLogSegmentRec() return: volume '%s' Segment %v", + volume.volumeName, logSegmentNumber) + valueToTree := make([]byte, len(value)) copy(valueToTree, value) diff --git a/inode/file.go b/inode/file.go index 4bca1733d..ac5bae133 100644 --- a/inode/file.go +++ b/inode/file.go @@ -404,6 +404,14 @@ func decrementLogSegmentMapFileData(fileInode *inMemoryInodeStruct, logSegmentNu // `recordWrite` is called by `Write` and `Wrote` to update the file inode // payload's record of the extents that compose the file. func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint64, logSegmentNumber uint64, logSegmentOffset uint64) (err error) { + + logger.Tracef( + "recordWrite() entry: volume '%s' inode %d offset %d len %d stored in logSegment %d offset %d", + fileInode.volume.volumeName, fileInode.InodeNumber, fileOffset, length, + logSegmentNumber, logSegmentOffset) + defer logger.Tracef("recordWrite() return: volume '%s' inode %d offset %d len %d", + fileInode.volume.volumeName, fileInode.InodeNumber, fileOffset, length) + extents := fileInode.payload.(sortedmap.BPlusTree) // First we need to eliminate extents or portions thereof that overlap the specified write @@ -426,7 +434,14 @@ func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint6 } leftExtent := extentValue.(*fileExtentStruct) if (leftExtent.FileOffset + leftExtent.Length) > fileOffset { + // Yes, we need to split the preceeding extent + logger.Tracef("recordWrite(): splitting extent volume '%s' inode %d"+ + " offset %d len %d logSegmentNumber %d logSegmentOffset %d", + fileInode.volume.volumeName, fileInode.InodeNumber, + leftExtent.FileOffset, leftExtent.Length, + leftExtent.LogSegmentNumber, leftExtent.LogSegmentOffset) + splitOutSize := (leftExtent.FileOffset + leftExtent.Length) - fileOffset leftExtent.Length -= splitOutSize ok, patchByIndexErr := extents.PatchByIndex(extentIndex, leftExtent) @@ -472,7 +487,14 @@ func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint6 break } if (fileOffset + length) >= (leftExtent.FileOffset + leftExtent.Length) { + // This extent entirely overwritten... just delete it + logger.Tracef("recordWrite(): deleting extent volume '%s' inode %d"+ + " offset %d len %d logSegmentNumber %d logSegmentOffset %d", + fileInode.volume.volumeName, fileInode.InodeNumber, + leftExtent.FileOffset, leftExtent.Length, + leftExtent.LogSegmentNumber, leftExtent.LogSegmentOffset) + decrementLogSegmentMapFileData(fileInode, leftExtent.LogSegmentNumber, leftExtent.Length) ok, deleteByIndexErr := extents.DeleteByIndex(extentIndex) if nil != deleteByIndexErr { @@ -492,6 +514,12 @@ func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint6 unexpectedErr := fmt.Errorf("unexpected extents indexing problem") panic(unexpectedErr) } + logger.Tracef("recordWrite(): pruning extent volume '%s' inode %d"+ + " offset %d len %d logSegmentNumber %d logSegmentOffset %d", + fileInode.volume.volumeName, fileInode.InodeNumber, + leftExtent.FileOffset, leftExtent.Length, + leftExtent.LogSegmentNumber, leftExtent.LogSegmentOffset) + overlapSize := (fileOffset + length) - leftExtent.FileOffset rightExtent := &fileExtentStruct{ FileOffset: leftExtent.FileOffset + overlapSize, @@ -541,7 +569,13 @@ func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint6 } if (nil != prevExtent) && (prevExtent.LogSegmentNumber == logSegmentNumber) && ((prevExtent.FileOffset + prevExtent.Length) == fileOffset) && ((prevExtent.LogSegmentOffset + prevExtent.Length) == logSegmentOffset) { + // APPEND Case: We are able to simply lengthen prevExtent + logger.Tracef("recordWrite(): growing extent volume '%s' inode %d"+ + " offset %d len %d logSegmentNumber %d logSegmentOffset %d", + fileInode.volume.volumeName, fileInode.InodeNumber, + prevExtent.FileOffset, prevExtent.Length, + prevExtent.LogSegmentNumber, prevExtent.LogSegmentOffset) prevExtent.Length += length ok, patchByIndexErr := extents.PatchByIndex(prevIndex, prevExtent) @@ -554,6 +588,10 @@ func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint6 } } else { // Non-APPEND Case: We need to insert a new extent + logger.Tracef("recordWrite(): inserting extent volume '%s' inode %d"+ + " offset %d len %d logSegmentNumber %d logSegmentOffset %d", + fileInode.volume.volumeName, fileInode.InodeNumber, + fileOffset, length, logSegmentNumber, logSegmentOffset) newExtent := &fileExtentStruct{ FileOffset: fileOffset, @@ -584,8 +622,6 @@ func (vS *volumeStruct) Write(fileInodeNumber InodeNumber, offset uint64, buf [] return } - fileInode.dirty = true - logSegmentNumber, logSegmentOffset, err := vS.doSendChunk(fileInode, buf) if nil != err { logger.ErrorWithError(err) @@ -605,6 +641,10 @@ func (vS *volumeStruct) Write(fileInodeNumber InodeNumber, offset uint64, buf [] offsetJustAfterWhereBufLogicallyWritten := offset + length if offsetJustAfterWhereBufLogicallyWritten > startingSize { + logger.Tracef("vs.Write(): growing file size %d to %d volume '%s' inode %d offset %d len %d", + fileInode.Size, offsetJustAfterWhereBufLogicallyWritten, + vS.volumeName, fileInodeNumber, offset, len(buf)) + fileInode.Size = offsetJustAfterWhereBufLogicallyWritten } @@ -617,6 +657,7 @@ func (vS *volumeStruct) Write(fileInodeNumber InodeNumber, offset uint64, buf [] fileInode.AttrChangeTime = updateTime fileInode.ModificationTime = updateTime fileInode.NumWrites++ + fileInode.dirty = true return } @@ -673,6 +714,9 @@ func (vS *volumeStruct) Wrote(fileInodeNumber InodeNumber, fileOffset uint64, ob if patchOnly { if (fileOffset + length) > fileInode.Size { + logger.Tracef("vs.Wrote(): growing file size %d to %d volume '%s' inode %d", + fileInode.Size, fileOffset+length, vS.volumeName, fileInodeNumber) + fileInode.Size = fileOffset + length } @@ -680,6 +724,8 @@ func (vS *volumeStruct) Wrote(fileInodeNumber InodeNumber, fileOffset uint64, ob fileInode.ModificationTime = updateTime fileInode.AttrChangeTime = updateTime } else { + logger.Tracef("vs.Wrote(): setting file size to %d volume '%s' inode %d", + length, vS.volumeName, fileInodeNumber) err = setSizeInMemory(fileInode, length) if err != nil { logger.ErrorWithError(err) diff --git a/inode/file_flusher.go b/inode/file_flusher.go index 64a275bd3..6760b1b26 100644 --- a/inode/file_flusher.go +++ b/inode/file_flusher.go @@ -321,6 +321,13 @@ func (vS *volumeStruct) doSendChunk(fileInode *inMemoryInodeStruct, buf []byte) } func (vS *volumeStruct) doFileInodeDataFlush(fileInode *inMemoryInodeStruct) (err error) { + + logger.Tracef("volumeStruct.doFileInodeDataFlush(): entry volume '%s' inode %d dirty %v openSegment %p", + vS.volumeName, fileInode.onDiskInodeV1Struct.InodeNumber, + fileInode.dirty, fileInode.openLogSegment) + defer logger.Tracef("volumeStruct.doFileInodeDataFlush(): return volume '%s' inode %d", + vS.volumeName, fileInode.onDiskInodeV1Struct.InodeNumber) + fileInode.Lock() if nil != fileInode.openLogSegment { diff --git a/inode/inode.go b/inode/inode.go index 188d64430..622f5efc4 100644 --- a/inode/inode.go +++ b/inode/inode.go @@ -48,7 +48,7 @@ type onDiskInodeV1Struct struct { // Preceded "on disk" by CorruptionDetected th UserID InodeUserID GroupID InodeGroupID StreamMap map[string][]byte - PayloadObjectNumber uint64 // DirInode: B+Tree Root with Key == dir_entry_name, Value = InodeNumber + PayloadObjectNumber uint64 // DirInode: B+Tree Root with Key == entry_name, Value = InodeNumber PayloadObjectLength uint64 // FileInode: B+Tree Root with Key == fileOffset, Value = fileExtent SymlinkTarget string // SymlinkInode: target path of symbolic link LogSegmentMap map[uint64]uint64 // FileInode: Key == LogSegment#, Value = file user data byte count @@ -86,7 +86,8 @@ func (vS *volumeStruct) fetchOnDiskInode(inodeNumber InodeNumber) (inMemoryInode version Version ) - logger.Tracef("inode.fetchOnDiskInode(): volume '%s' inode %d", vS.volumeName, inodeNumber) + logger.Tracef("inode.fetchOnDiskInode() entry: volume '%s' inode %d", vS.volumeName, inodeNumber) + defer logger.Tracef("inode.fetchOnDiskInode() return: volume '%s' inode %d", vS.volumeName, inodeNumber) inodeRec, ok, err = vS.headhunterVolumeHandle.GetInodeRec(uint64(inodeNumber)) if nil != err { @@ -368,6 +369,9 @@ func (vS *volumeStruct) flushInodes(inodes []*inMemoryInodeStruct) (err error) { emptyLogSegments = make([]uint64, 0) for _, inode = range inodes { + logger.Tracef("flushInodes(): flushing volume '%s' inode %d dirty %v", + vS.volumeName, inode.InodeNumber, inode.dirty) + if FileType == inode.InodeType { err = vS.doFileInodeDataFlush(inode) if nil != err { @@ -378,6 +382,12 @@ func (vS *volumeStruct) flushInodes(inodes []*inMemoryInodeStruct) (err error) { emptyLogSegmentsThisInode = make([]uint64, 0) for logSegmentNumber, logSegmentValidBytes = range inode.LogSegmentMap { if 0 == logSegmentValidBytes { + if !inode.dirty { + err = fmt.Errorf( + "Logic error: empy log segment so inode.dirty should be true") + logger.ErrorWithError(err) + return + } emptyLogSegmentsThisInode = append(emptyLogSegmentsThisInode, logSegmentNumber) } } @@ -397,13 +407,16 @@ func (vS *volumeStruct) flushInodes(inodes []*inMemoryInodeStruct) (err error) { } if payloadObjectNumber > inode.PayloadObjectNumber { if !inode.dirty { - err = fmt.Errorf("Logic error: inode.dirty should have been true") + err = fmt.Errorf("Logic error: payload so inode.dirty should be true") logger.ErrorWithError(err) err = blunder.AddError(err, blunder.InodeFlushError) return } inode.PayloadObjectNumber = payloadObjectNumber inode.PayloadObjectLength = payloadObjectLength + + logger.Tracef("flushInodes(): flushed payload for volume '%s' inode %d to object %d", + vS.volumeName, inode.InodeNumber, payloadObjectNumber) } } if inode.dirty { @@ -429,6 +442,9 @@ func (vS *volumeStruct) flushInodes(inodes []*inMemoryInodeStruct) (err error) { // Go update HeadHunter (if necessary) if 0 < len(dirtyInodeNumbers) { + logger.Tracef("flushInodes(): flushing %d dirty inodes to headhunter on volume '%s'", + len(dirtyInodeNumbers), vS.volumeName) + err = vS.headhunterVolumeHandle.PutInodeRecs(dirtyInodeNumbers, dirtyInodeRecs) if nil != err { logger.ErrorWithError(err) @@ -446,6 +462,9 @@ func (vS *volumeStruct) flushInodes(inodes []*inMemoryInodeStruct) (err error) { // Now do phase one of garbage collection if 0 < len(emptyLogSegments) { for _, logSegmentNumber = range emptyLogSegments { + + logger.Tracef("flushInodes(): deleting log segment volume '%s' log segment %d", + vS.volumeName, logSegmentNumber) err = vS.deleteLogSegmentAsync(logSegmentNumber, checkpointDoneWaitGroup) if nil != err { logger.WarnfWithError(err, "couldn't delete garbage log segment") @@ -747,7 +766,8 @@ func (vS *volumeStruct) Purge(inodeNumber InodeNumber) (err error) { func (vS *volumeStruct) Destroy(inodeNumber InodeNumber) (err error) { - logger.Tracef("inode.Destroy(): volume '%s' inode %d", vS.volumeName, inodeNumber) + logger.Tracef("inode.Destroy() entry: volume '%s' inode %d", vS.volumeName, inodeNumber) + defer logger.Tracef("inode.Destroy() return: volume '%s' inode %d", vS.volumeName, inodeNumber) ourInode, ok, err := vS.fetchInode(inodeNumber) if nil != err { diff --git a/logger/api_test.go b/logger/api_test.go index 0729b25bc..b4859174e 100644 --- a/logger/api_test.go +++ b/logger/api_test.go @@ -30,7 +30,7 @@ func TestAPI(t *testing.T) { "Stats.UDPPort=52184", "Stats.BufferLength=100", "Stats.MaxLatency=1s", - "Logging.TraceLevelLogging=logger", + "Logging.TraceLevelLogging=logger fs headhunter", } confMap, err := conf.MakeConfMapFromStrings(confStrings) diff --git a/logger/config.go b/logger/config.go index 66a7c55d5..80e8b74a9 100644 --- a/logger/config.go +++ b/logger/config.go @@ -10,6 +10,10 @@ import ( "github.com/swiftstack/ProxyFS/conf" ) +// RFC3339 format with microsecond precision +// +const timeFormat = "2006-01-02T15:04:05.000000Z07:00" + var logFile *os.File = nil // multiWriter groups multiple io.Writers into a single io.Writer. (Our @@ -56,7 +60,7 @@ func addLogTarget(writer io.Writer) { } func up(confMap conf.ConfMap) (err error) { - log.SetFormatter(&log.TextFormatter{DisableColors: true}) + log.SetFormatter(&log.TextFormatter{DisableColors: true, TimestampFormat: timeFormat}) // Fetch log file info, if provided logFilePath, _ := confMap.FetchOptionValueString("Logging", "LogFilePath") diff --git a/proxyfsd/default.conf b/proxyfsd/default.conf index 00a784545..397f7506f 100644 --- a/proxyfsd/default.conf +++ b/proxyfsd/default.conf @@ -97,9 +97,19 @@ Debug: false # Log reporting parameters [Logging] LogFilePath: proxyfsd.log -LogToConsole: false # when true, log to stderr even when LogFilePath is set -TraceLevelLogging: none # Enable trace logging on a per-package basis. Supported values: jrpcfs, inode, none (default) -DebugLevelLogging: none # Enable debug logging on a per-package basis. Supported values: ldlm, fs, jrpcfs, inode, none (default) + +# when true, log to stderr even when LogFilePath is set +LogToConsole: false + +# Enable trace logging on a per-package basis. Trace logs are disabled by default unless enabled here. +# Supported values: any combination of "dlm", "fs", "fuse", "headhunter", "inode", "jrpcfs", "logger", +# and "swiftclient" or "none" (default). +TraceLevelLogging: none + +# Enable debug logging on a per-package basis. Debug logs are disabled by default unless enabled here. +# Supported values: any combination of "ldlm", "fs", "jrpcfs" and "inode" or "none" (default). +DebugLevelLogging: none + # NOTE: Log levels other than Trace and Debug are always on. # Stats reporting parameters (must contain either a UDPPort or TCPPort) diff --git a/proxyfsd/logging.conf b/proxyfsd/logging.conf index 632412bcc..d9c621605 100644 --- a/proxyfsd/logging.conf +++ b/proxyfsd/logging.conf @@ -4,7 +4,8 @@ LogFilePath: proxyfsd.log # NOTE: Log levels other than Trace and Debug are always on. # Enable trace logging on a per-package basis. Trace logs are disabled by default unless enabled here. -# Supported values: jrpcfs, inode, none (default). +# Supported values: any combination of "dlm", "fs", "fuse", "headhunter", "inode", "jrpcfs", "logger", +# and "swiftclient" or "none" (default). TraceLevelLogging: none # Enable debug logging on a per-package basis. Debug logs are disabled by default unless enabled here. @@ -13,4 +14,3 @@ DebugLevelLogging: none # when true, log to stderr even when LogFilePath is set LogToConsole: false - diff --git a/proxyfsd/saio_logging.conf b/proxyfsd/saio_logging.conf index eec4df5f7..61fb9c606 100644 --- a/proxyfsd/saio_logging.conf +++ b/proxyfsd/saio_logging.conf @@ -4,7 +4,8 @@ LogFilePath: /var/log/proxyfsd/proxyfsd.log # NOTE: Log levels other than Trace and Debug are always on. # Enable trace logging on a per-package basis. Trace logs are disabled by default unless enabled here. -# Supported values: jrpcfs, inode, none (default). +# Supported values: any combination of "dlm", "fs", "fuse", "headhunter", "inode", "jrpcfs", "logger", +# and "swiftclient" or "none" (default). TraceLevelLogging: none # Enable debug logging on a per-package basis. Debug logs are disabled by default unless enabled here. diff --git a/swiftclient/api_test.go b/swiftclient/api_test.go index 257b9497d..06a9ba2c0 100644 --- a/swiftclient/api_test.go +++ b/swiftclient/api_test.go @@ -953,7 +953,7 @@ func parseRetryLogEntry(entry string) map[string]string { ) var fieldRE = regexp.MustCompile( - `^time="([-:0-9A-Z]+)" level=([a-zA-Z]+) msg="([^"]+)" (error="([^"]+)")? ?function=(\w+) (.*)`) + `^time="([-:0-9.A-Z]+)" level=([a-zA-Z]+) msg="([^"]+)" (error="([^"]+)")? ?function=(\w+) (.*)`) matches = fieldRE.FindStringSubmatch(entry) if matches == nil { diff --git a/swiftclient/object.go b/swiftclient/object.go index 98363e784..e4860292d 100644 --- a/swiftclient/object.go +++ b/swiftclient/object.go @@ -283,14 +283,19 @@ func objectDeleteSync(accountName string, containerName string, objectName strin func objectGetWithRetry(accountName string, containerName string, objectName string, offset uint64, length uint64) ([]byte, error) { - // request is a function that, through the miracle of closure, calls - // objectGet() with the paramaters passed to this function, stashes the - // relevant return values into the local variables of this function, and - // then returns err and whether it is retriable to RequestWithRetry() var ( buf []byte err error ) + logger.Tracef("swiftclient.ObjectGet() entry: account '%s' container %s object %s off %d len %d", + accountName, containerName, objectName, offset, length) + defer logger.Tracef("swiftclient.ObjectGet() return: account '%s' container %s object %s off %d len %d", + accountName, containerName, objectName, offset, length) + + // request is a function that, through the miracle of closure, calls + // objectGet() with the paramaters passed to this function, stashes the + // relevant return values into the local variables of this function, and + // then returns err and whether it is retriable to RequestWithRetry() request := func() (bool, error) { var err error buf, err = objectGet(accountName, containerName, objectName, offset, length) @@ -725,14 +730,21 @@ func (chunkedPutContext *chunkedPutContextStruct) DumpValue(value sortedmap.Valu } func objectFetchChunkedPutContextWithRetry(accountName string, containerName string, objectName string) (*chunkedPutContextStruct, error) { + + var ( + chunkedPutContext *chunkedPutContextStruct + err error + ) + logger.Tracef("swiftclient.ObjectFetchChunkedPutContext() entry: account '%s' container %s object %s", + accountName, containerName, objectName) + defer logger.Tracef("swiftclient.ObjectFetchChunkedPutContext() return: account '%s' container %s object %s", + accountName, containerName, objectName) + // request is a function that, through the miracle of closure, calls // objectFetchChunkedPutContext() with the paramaters passed to this // function, stashes the relevant return values into the local variables of // this function, and then returns err and whether it is retriable to // RequestWithRetry() - var ( - chunkedPutContext *chunkedPutContextStruct - ) request := func() (bool, error) { var err error chunkedPutContext, err = objectFetchChunkedPutContext(accountName, containerName, objectName) @@ -746,7 +758,7 @@ func objectFetchChunkedPutContextWithRetry(accountName string, containerName str retryCnt: &stats.SwiftObjFetchPutCtxtRetryOps, retrySuccessCnt: &stats.SwiftObjFetchPutCtxtRetrySuccessOps} ) - err := retryObj.RequestWithRetry(request, &opname, &statnm) + err = retryObj.RequestWithRetry(request, &opname, &statnm) return chunkedPutContext, err } @@ -809,6 +821,11 @@ func (chunkedPutContext *chunkedPutContextStruct) BytesPut() (bytesPut uint64, e func (chunkedPutContext *chunkedPutContextStruct) Close() (err error) { + logger.Tracef("swiftclient.chunkedPutContext.Close() entry: account '%s' container %s object %s", + chunkedPutContext.accountName, chunkedPutContext.containerName, chunkedPutContext.objectName) + defer logger.Tracef("swiftclient.chunkedPutContext.Close() return: account '%s' container %s object %s", + chunkedPutContext.accountName, chunkedPutContext.containerName, chunkedPutContext.objectName) + err = chunkedPutContext.closeHelper() if nil == err { return @@ -933,6 +950,14 @@ func (chunkedPutContext *chunkedPutContextStruct) Read(offset uint64, length uin ok bool readLimitOffset uint64 ) + logger.Tracef("swiftclient.chunkedPutContext.Read() entry: account '%s' container %s object %s"+ + " off %d len %d", + chunkedPutContext.accountName, chunkedPutContext.containerName, chunkedPutContext.objectName, + offset, length) + defer logger.Tracef("swiftclient.chunkedPutContext.Read() return: account '%s' container %s object %s"+ + " off %d len %d", + chunkedPutContext.accountName, chunkedPutContext.containerName, chunkedPutContext.objectName, + offset, length) readLimitOffset = offset + length @@ -1164,6 +1189,15 @@ var ( // func (chunkedPutContext *chunkedPutContextStruct) SendChunk(buf []byte) (err error) { + logger.Tracef("swiftclient.chunkedPutContext.SendChunk() entry: account '%s' container %s object %s"+ + " off %d len %d", + chunkedPutContext.accountName, chunkedPutContext.containerName, chunkedPutContext.objectName, + chunkedPutContext.bytesPut, len(buf)) + defer logger.Tracef("swiftclient.chunkedPutContext.SendChunk() entry: account '%s' container %s object %s"+ + " off %d len %d", + chunkedPutContext.accountName, chunkedPutContext.containerName, chunkedPutContext.objectName, + chunkedPutContext.bytesPut, len(buf)) + chunkedPutContext.Lock() sendChunkCnt += 1