-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathupload.go
183 lines (150 loc) · 5.91 KB
/
upload.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
////
//// This is the Skytap AZCopy Upload utility for AIX and IBM i
//// Created 12/02/2021
////
////
////
package main
import (
"context"
"fmt"
"log"
"math/rand"
"net/url"
"os"
"strconv"
"path/filepath"
"time"
"github.com/Azure/azure-storage-blob-go/azblob"
)
func randomString() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return strconv.Itoa(r.Int())
}
func uploadSingleFile(fileName string, containerURL azblob.ContainerURL) {
ctx := context.Background() // This example uses a never-expiring context
// Here's how to upload a blob.
blobURL := containerURL.NewBlockBlobURL(fileName)
file, err := os.Open(fileName)
handleErrors(err)
// You can use the low-level PutBlob API to upload files. Low-level APIs are simple wrappers for the Azure Storage REST APIs.
// Note that PutBlob can upload up to 256MB data in one shot. Details: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob
// Following is commented out intentionally because we will instead use UploadFileToBlockBlob API to upload the blob
// _, err = blobURL.PutBlob(ctx, file, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
// handleErrors(err)
// The high-level API UploadFileToBlockBlob function uploads blocks in parallel for optimal performance, and can handle large files as well.
// This function calls PutBlock/PutBlockList for files larger 256 MBs, and calls PutBlob for any file smaller
fmt.Printf("Uploading the file with blob name: %s\n", fileName)
_, err = azblob.UploadFileToBlockBlob(ctx, file, blobURL, azblob.UploadToBlockBlobOptions{
BlockSize: 4 * 1024 * 1024,
Parallelism: 16})
handleErrors(err)
defer file.Close()
}
func handleErrors(err error) {
if err != nil {
if serr, ok := err.(azblob.StorageError); ok { // This error is a Service-specific
switch serr.ServiceCode() { // Compare serviceCode to ServiceCodeXxx constants
case azblob.ServiceCodeContainerAlreadyExists:
fmt.Println("Received 409. Container already exists")
return
}
}
log.Fatal(err)
}
}
func main() {
// First element in os.Args is always the program name,
// So we need at least 2 arguments to have a file name argument.
if len(os.Args) < 2 {
fmt.Println("Missing parameter, usage as follows: \n ./skyzcopy_upload <filename_or_directory> [container_name]")
return
}
fileNameorDirectory := os.Args[1]
fmt.Printf("Azure Blob storage Skytap upload\n")
// From the Azure portal, get your storage account name and key and set environment variables.
accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT"), os.Getenv("AZURE_STORAGE_ACCESS_KEY")
if len(accountName) == 0 || len(accountKey) == 0 {
log.Fatal("Either the AZURE_STORAGE_ACCOUNT or AZURE_STORAGE_ACCESS_KEY environment variable is not set")
}
// Create a default request pipeline using your storage account name and account key.
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
log.Fatal("Invalid credentials with error: " + err.Error())
}
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
var containerURL azblob.ContainerURL
// This for loop asks whether the user needs to create a new container or if they want to use an already existing one.
if len(os.Args) == 3 {
containerName := os.Args[2]
// From the Azure portal, get your storage account blob service URL endpoint.
URL, _ := url.Parse(
fmt.Sprintf("https://%s.blob.core.windows.net/%s", accountName, containerName))
containerURL = azblob.NewContainerURL(*URL, p)
} else {
// Create a random string for the new container
containerName := fmt.Sprintf("ibmupload%s", randomString())
// From the Azure portal, get your storage account blob service URL endpoint.
URL, _ := url.Parse(
fmt.Sprintf("https://%s.blob.core.windows.net/%s", accountName, containerName))
// Create a ContainerURL object that wraps the container URL and a request
// pipeline to make requests.
containerURL = azblob.NewContainerURL(*URL, p)
// Create the container
fmt.Printf("Creating a container named %s\n", containerName)
ctx := context.Background() // This example uses a never-expiring context
_, err = containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
handleErrors(err)
}
fileInfo, err := os.Stat(fileNameorDirectory)
if err != nil {
handleErrors(err)
}
if fileInfo.IsDir() {
fileList := make([]string, 0)
err := filepath.Walk(fileNameorDirectory, func(path string, f os.FileInfo, err error) error {
fileList = append(fileList, path)
return err
})
if err != nil {
if os.IsNotExist(err) {
fmt.Println(err)
}
}
for _, file := range fileList {
fileInfo, err := os.Stat(file)
if err != nil {
fmt.Println("Skipping file: %s due to error opening.", file)
continue
}
if fileInfo.Size() == 0 {
continue
}
if os.IsNotExist(err) {
fmt.Println("Skipping file: %s as this file does not exist.", file)
}
if fileInfo.IsDir() {
} else {
uploadSingleFile(file, containerURL)
}
// fmt.Println(file)
}
} else {
uploadSingleFile(fileNameorDirectory, containerURL)
}
// List the container that we have created above
fmt.Println("Finished uploading! \n\n Listing the blobs in the container:")
for marker := (azblob.Marker{}); marker.NotDone(); {
ctx := context.Background() // This example uses a never-expiring context
// Get a result segment starting with the blob indicated by the current Marker.
listBlob, err := containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{})
handleErrors(err)
// ListBlobs returns the start of the next segment; you MUST use this to get
// the next segment (after processing the current result segment).
marker = listBlob.NextMarker
// Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute)
for _, blobInfo := range listBlob.Segment.BlobItems {
fmt.Print(" Blob name: " + blobInfo.Name + "\n")
}
}
}