diff --git a/client/handle_http.go b/client/handle_http.go index 17540f5ab..87552f9fc 100644 --- a/client/handle_http.go +++ b/client/handle_http.go @@ -2397,7 +2397,7 @@ func createWebDavClient(collectionsUrl *url.URL, token string, project string) ( return } -// Walk a remote directory in a WebDAV server, emitting the files discovered +// Walk a remote collection in a WebDAV server, emitting the files discovered func (te *TransferEngine) walkDirDownload(job *clientTransferJob, transfers []transferAttemptDetails, files chan *clientTransferFile, url *url.URL) error { // Create the client to walk the filesystem collUrl := job.job.dirResp.XPelNsHdr.CollectionsUrl @@ -2412,7 +2412,7 @@ func (te *TransferEngine) walkDirDownload(job *clientTransferJob, transfers []tr // Helper function for the `walkDirDownload`. // -// Recursively walks through the remote server directory, emitting transfer files +// Recursively walks through the remote server collection, emitting transfer files // for the engine to process. func (te *TransferEngine) walkDirDownloadHelper(job *clientTransferJob, transfers []transferAttemptDetails, files chan *clientTransferFile, remotePath string, client *gowebdav.Client) error { // Check for cancelation since the client does not respect the context @@ -2421,7 +2421,7 @@ func (te *TransferEngine) walkDirDownloadHelper(job *clientTransferJob, transfer } infos, err := client.ReadDir(remotePath) if err != nil { - return errors.Wrap(err, "failed to read remote directory") + return errors.Wrap(err, "failed to read remote collection") } localBase := strings.TrimPrefix(remotePath, job.job.remoteURL.Path) for _, info := range infos { @@ -2507,9 +2507,9 @@ func (te *TransferEngine) walkDirUpload(job *clientTransferJob, transfers []tran return err } -// This function performs the ls command by walking through the specified directory and printing the contents of the files +// This function performs the ls command by walking through the specified collections and printing the contents of the files func listHttp(remoteObjectUrl *url.URL, dirResp server_structs.DirectorResponse, token string) (fileInfos []FileInfo, err error) { - // Get our directory listing host + // Get our collection listing host collectionsUrl := dirResp.XPelNsHdr.CollectionsUrl log.Debugln("Collections URL: ", collectionsUrl.String()) @@ -2528,14 +2528,14 @@ func listHttp(remoteObjectUrl *url.URL, dirResp server_structs.DirectorResponse, if err != nil { return nil, errors.Wrap(err, "failed to stat remote path") } - // If the path leads to a file and not a directory, just add the filename + // If the path leads to a file and not a collection, just add the filename if !info.IsDir() { // NOTE: we implement our own FileInfo here because the one we get back from stat() does not have a .name field for some reason file := FileInfo{ - Name: remotePath, - Size: info.Size(), - ModTime: info.ModTime(), - IsDir: false, + Name: remotePath, + Size: info.Size(), + ModTime: info.ModTime(), + IsCollection: false, } fileInfos = append(fileInfos, file) return fileInfos, nil @@ -2545,17 +2545,17 @@ func listHttp(remoteObjectUrl *url.URL, dirResp server_structs.DirectorResponse, return nil, errors.Errorf("405: object listings are not supported by the discovered origin") } // Otherwise, a different error occurred and we should return it - return nil, errors.Wrap(err, "failed to read remote directory") + return nil, errors.Wrap(err, "failed to read remote collection") } for _, info := range infos { jPath, _ := url.JoinPath(remotePath, info.Name()) // Create a FileInfo for the file and append it to the slice file := FileInfo{ - Name: jPath, - Size: info.Size(), - ModTime: info.ModTime(), - IsDir: info.IsDir(), + Name: jPath, + Size: info.Size(), + ModTime: info.ModTime(), + IsCollection: info.IsDir(), } fileInfos = append(fileInfos, file) } @@ -2616,10 +2616,10 @@ func statHttp(dest *url.URL, dirResp server_structs.DirectorResponse, token stri fsinfo, err := client.Stat(endpoint.Path) if err == nil { info = FileInfo{ - Name: endpoint.Path, - Size: fsinfo.Size(), - IsDir: fsinfo.IsDir(), - ModTime: fsinfo.ModTime(), + Name: endpoint.Path, + Size: fsinfo.Size(), + IsCollection: fsinfo.IsDir(), + ModTime: fsinfo.ModTime(), } break } else if gowebdav.IsErrCode(err, http.StatusMethodNotAllowed) { @@ -2646,7 +2646,7 @@ func statHttp(dest *url.URL, dirResp server_structs.DirectorResponse, token stri } if info.Size == 0 { - if info.IsDir { + if info.IsCollection { resultsChan <- statResults{info, nil} } err = errors.New("Stat response did not include a size") @@ -2655,10 +2655,10 @@ func statHttp(dest *url.URL, dirResp server_structs.DirectorResponse, token stri } resultsChan <- statResults{FileInfo{ - Name: endpoint.Path, - Size: info.Size, - IsDir: info.IsDir, - ModTime: info.ModTime, + Name: endpoint.Path, + Size: info.Size, + IsCollection: info.IsCollection, + ModTime: info.ModTime, }, nil} }(&destCopy) diff --git a/client/main.go b/client/main.go index 8117988ad..19d64204e 100644 --- a/client/main.go +++ b/client/main.go @@ -49,10 +49,10 @@ var ObjectServersToTry int = 3 // NOTE: this was created to provide more flexibility to information on a file. The fs.FileInfo interface was causing some issues like not always returning a Name attribute // ALSO NOTE: the fields are exported so they can be marshalled into JSON, it does not work otherwise type FileInfo struct { - Name string - Size int64 - ModTime time.Time - IsDir bool + Name string + Size int64 + ModTime time.Time + IsCollection bool } // Determine the token name if it is embedded in the scheme, Condor-style diff --git a/cmd/object_copy.go b/cmd/object_copy.go index c46564759..277d0cf32 100644 --- a/cmd/object_copy.go +++ b/cmd/object_copy.go @@ -53,7 +53,7 @@ func init() { flagSet := copyCmd.Flags() flagSet.StringP("cache", "c", "", "Cache to use") flagSet.StringP("token", "t", "", "Token file to use for transfer") - flagSet.BoolP("recursive", "r", false, "Recursively copy a directory. Forces methods to only be http to get the freshest directory contents") + flagSet.BoolP("recursive", "r", false, "Recursively copy a collection. Forces methods to only be http to get the freshest collection contents") flagSet.StringP("cache-list-name", "n", "xroot", "(Deprecated) Cache list to use, currently either xroot or xroots; may be ignored") flagSet.Lookup("cache-list-name").Hidden = true // All the deprecated or hidden flags that are only relevant if we are in historical "stashcp mode" @@ -158,7 +158,7 @@ func copyMain(cmd *cobra.Command, args []string) { log.Errorln("Destination does not exist") os.Exit(1) } else if !destStat.IsDir() { - log.Errorln("Destination is not a directory") + log.Errorln("Destination is not a collection") os.Exit(1) } } diff --git a/cmd/object_get.go b/cmd/object_get.go index fba902cb5..15cfbf3e5 100644 --- a/cmd/object_get.go +++ b/cmd/object_get.go @@ -45,7 +45,7 @@ func init() { flagSet := getCmd.Flags() flagSet.StringP("cache", "c", "", "Cache to use") flagSet.StringP("token", "t", "", "Token file to use for transfer") - flagSet.BoolP("recursive", "r", false, "Recursively download a directory. Forces methods to only be http to get the freshest directory contents") + flagSet.BoolP("recursive", "r", false, "Recursively download a collection. Forces methods to only be http to get the freshest collection contents") flagSet.StringP("cache-list-name", "n", "xroot", "(Deprecated) Cache list to use, currently either xroot or xroots; may be ignored") flagSet.Lookup("cache-list-name").Hidden = true flagSet.String("caches", "", "A JSON file containing the list of caches") diff --git a/cmd/object_ls.go b/cmd/object_ls.go index 221e7ede6..a8fca1bb0 100644 --- a/cmd/object_ls.go +++ b/cmd/object_ls.go @@ -117,12 +117,12 @@ func listMain(cmd *cobra.Command, args []string) error { filteredInfos := []client.FileInfo{} - // Filter by object or directory + // Filter by object or collection for _, info := range fileInfos { - if collectionOnly && !info.IsDir { + if collectionOnly && !info.IsCollection { continue } - if objectOnly && info.IsDir { + if objectOnly && info.IsCollection { continue } filteredInfos = append(filteredInfos, info) @@ -136,7 +136,7 @@ func listMain(cmd *cobra.Command, args []string) error { if asJSON { jsonData, err := json.Marshal(filteredInfos) if err != nil { - return errors.Errorf("failed to marshal object/directory info to JSON format: %v", err) + return errors.Errorf("failed to marshal object/collection info to JSON format: %v", err) } fmt.Println(string(jsonData)) return nil @@ -155,7 +155,7 @@ func listMain(cmd *cobra.Command, args []string) error { // Convert the FileInfo to JSON and print it jsonData, err := json.Marshal(jsonInfo) if err != nil { - return errors.Errorf("failed to marshal object/directory info to JSON format: %v", err) + return errors.Errorf("failed to marshal object/collection info to JSON format: %v", err) } fmt.Println(string(jsonData)) } else { diff --git a/cmd/object_put.go b/cmd/object_put.go index 94eb1eae0..bd053edc8 100644 --- a/cmd/object_put.go +++ b/cmd/object_put.go @@ -41,7 +41,7 @@ var ( func init() { flagSet := putCmd.Flags() flagSet.StringP("token", "t", "", "Token file to use for transfer") - flagSet.BoolP("recursive", "r", false, "Recursively upload a directory. Forces methods to only be http to get the freshest directory contents") + flagSet.BoolP("recursive", "r", false, "Recursively upload a collection. Forces methods to only be http to get the freshest collection contents") objectCmd.AddCommand(putCmd) } diff --git a/cmd/object_stat.go b/cmd/object_stat.go index bbb210101..75bfd154b 100644 --- a/cmd/object_stat.go +++ b/cmd/object_stat.go @@ -97,7 +97,7 @@ func statMain(cmd *cobra.Command, args []string) { // Print our stat info in JSON format: jsonData, err := json.Marshal(statInfo) if err != nil { - log.Errorf("Failed to parse object/directory stat info to JSON format: %v", err) + log.Errorf("Failed to parse object/collection stat info to JSON format: %v", err) os.Exit(1) } fmt.Println(string(jsonData)) @@ -107,7 +107,7 @@ func statMain(cmd *cobra.Command, args []string) { fmt.Println("Name:", statInfo.Name) fmt.Println("Size:", statInfo.Size) fmt.Println("ModTime:", statInfo.ModTime) - fmt.Println("IsDir:", statInfo.IsDir) + fmt.Println("IsCollection:", statInfo.IsCollection) return } } diff --git a/cmd/plugin_test.go b/cmd/plugin_test.go index 9dbc9087c..6e1cbd56b 100644 --- a/cmd/plugin_test.go +++ b/cmd/plugin_test.go @@ -755,7 +755,7 @@ func TestPluginRecursiveDownload(t *testing.T) { results := make(chan *classads.ClassAd, 5) err = runPluginWorker(fed.Ctx, false, workChan, results) assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to read remote directory: PROPFIND /test/test/test.txt/: 500") + assert.Contains(t, err.Error(), "failed to read remote collection: PROPFIND /test/test/test.txt/: 500") }) t.Run("TestRecursiveFailureDirNotFound", func(t *testing.T) { diff --git a/director/director.go b/director/director.go index 9bc119d1e..b160fc361 100644 --- a/director/director.go +++ b/director/director.go @@ -744,7 +744,7 @@ func redirectToOrigin(ginCtx *gin.Context) { } ginCtx.JSON(http.StatusMethodNotAllowed, server_structs.SimpleApiResp{ Status: server_structs.RespFailed, - Msg: "No origins on specified endpoint allow directory listings", + Msg: "No origins on specified endpoint allow collection listings", }) } diff --git a/docs/pages/getting-data-with-pelican/client.mdx b/docs/pages/getting-data-with-pelican/client.mdx index 65115f0c4..f989afd61 100644 --- a/docs/pages/getting-data-with-pelican/client.mdx +++ b/docs/pages/getting-data-with-pelican/client.mdx @@ -180,7 +180,7 @@ pelican object get pelican:// pelican://?pack=tar.gz +pelican object put pelican://?pack=tar.gz ``` Pelican accepts the following values for the `pack` query: @@ -192,16 +192,16 @@ Pelican accepts the following values for the `pack` query: - For uploading, create the object in the specified format (`tar`, `tar.gz`, `tar.xz`, `zip`, respectively). ### Recursive Downloads and Uploads with the `?recursive` Query -The `?recursive` query can be utilized if the desired remote object is a directory. When this query is enabled, it indicates to Pelican that all sub paths at the level of the provided namespace should be copied recursively. To use this query, run: +The `?recursive` query can be utilized if the desired remote object is a collection. When this query is enabled, it indicates to Pelican that all sub paths at the level of the provided namespace should be copied recursively. To use this query, run: ```bash -pelican object get pelican://?recursive +pelican object get pelican://?recursive ``` To upload, you can run something similar but with an `object put`: ```bash -pelican object put pelican://?recursive +pelican object put pelican://?recursive ``` >**Note:** This query functions the same as specifying the `-r` flag described below. diff --git a/docs/parameters.yaml b/docs/parameters.yaml index 0ae99e7db..a4ce75510 100644 --- a/docs/parameters.yaml +++ b/docs/parameters.yaml @@ -803,7 +803,7 @@ name: Origin.EnableDirListing description: |+ [Deprecated] Origin.EnableDirListing is being deprecated and will be removed in a future release. It is replaced by Origin.EnableListings. - Allows the origin to enable directory listings. Needs to be enabled for recursive + Allows the origin to enable collection listings. Needs to be enabled for recursive downloads to work properly and for directories to be visible. type: bool default: false diff --git a/server_utils/origin.go b/server_utils/origin.go index a458d2742..f268bbd74 100644 --- a/server_utils/origin.go +++ b/server_utils/origin.go @@ -662,7 +662,7 @@ func CheckOriginSentinelLocations(exports []OriginExport) (ok bool, err error) { fullPath := filepath.Join(export.StoragePrefix, sentinelPath) _, err := os.Stat(fullPath) if err != nil { - return false, errors.Wrapf(err, "fail to open SentinelLocation %s for StoragePrefix %s. Directory check failed", export.SentinelLocation, export.StoragePrefix) + return false, errors.Wrapf(err, "fail to open SentinelLocation %s for StoragePrefix %s. Collection check failed", export.SentinelLocation, export.StoragePrefix) } } }