Skip to content
This repository has been archived by the owner on Aug 14, 2021. It is now read-only.

Commit

Permalink
Merge pull request #48 from kmova/sync-master-jul18
Browse files Browse the repository at this point in the history
sync with master by resolving conflicts. Also, make the change to pass the mountOptions from storageClass to PV.
  • Loading branch information
kmova authored Jul 18, 2018
2 parents afd4d17 + 314d894 commit bcb41cf
Show file tree
Hide file tree
Showing 23,671 changed files with 397,410 additions and 7,813,601 deletions.
The diff you're trying to view is too large. We only load the first 3000 changed files.
3 changes: 0 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,6 @@ flycheck_*.el
/snapshot/deploy/docker/controller/snapshot-controller
/snapshot/deploy/docker/provisioner/snapshot-provisioner

# cinder binary
/openstack/standalone-cinder/cinder-provisioner

# digitalocean binaries
/digitalocean/digitalocean-flexplugin
/digitalocean/deploy/docker/digitalocean-flexplugin
Expand Down
1 change: 1 addition & 0 deletions .golintignore
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
snapshot
nfs/test/e2e
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ language: go
env:
- CHANGE_MINIKUBE_NONE_USER=true
go:
- 1.8.3
- 1.10.3
services: docker

install: true
Expand Down
36 changes: 15 additions & 21 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.

all: aws/efs ceph/cephfs ceph/rbd flex gluster/block gluster/glusterfs gluster/file iscsi/targetd local-volume/provisioner nfs-client nfs snapshot openstack/standalone-cinder
all: aws/efs ceph/cephfs ceph/rbd flex gluster/block gluster/glusterfs gluster/file iscsi/targetd local-volume/provisioner nfs-client nfs snapshot
.PHONY: all

clean: clean-aws/efs clean-ceph/cephfs clean-ceph/rbd clean-flex clean-gluster/block clean-gluster/glusterfs clean-iscsi/targetd clean-local-volume/provisioner clean-nfs-client clean-nfs clean-openebs clean-snapshot clean-openstack/standalone-cinder
clean: clean-aws/efs clean-ceph/cephfs clean-ceph/rbd clean-flex clean-gluster/block clean-gluster/glusterfs clean-iscsi/targetd clean-local-volume/provisioner clean-nfs-client clean-nfs clean-openebs clean-snapshot
.PHONY: clean


test: test-aws/efs test-local-volume/provisioner test-nfs test-snapshot test-openstack/standalone-cinder
test: test-aws/efs test-local-volume/provisioner test-nfs test-snapshot
.PHONY: test

verify:
Expand Down Expand Up @@ -127,6 +126,11 @@ test-local-volume/provisioner:
go test ./...
.PHONY: test-local-volume/provisioner

test-local-volume/helm:
cd local-volume/helm; \
./test/run.sh
.PHONY: test-local-volume/helm

clean-local-volume/provisioner:
cd local-volume/provisioner; \
make clean
Expand All @@ -142,12 +146,12 @@ clean-nfs-client:
rm -f nfs-client-provisioner
.PHONY: clean-nfs-client

nfs:
nfs:
cd nfs; \
make container
.PHONY: nfs

test-nfs:
test-nfs:
cd nfs; \
make test
.PHONY: test-nfs
Expand Down Expand Up @@ -182,21 +186,6 @@ clean-snapshot:
make clean
.PHONY: clean-snapshot

openstack/standalone-cinder:
cd openstack/standalone-cinder; \
make
.PHONY: openstack/standalone-cinder

test-openstack/standalone-cinder:
cd openstack/standalone-cinder; \
make test
.PHONY: test-openstack/standalone-cinder

clean-openstack/standalone-cinder:
cd openstack/standalone-cinder; \
make clean
.PHONY: clean-openstack/standalone-cinder

test-snapshot:
cd snapshot; \
make test
Expand Down Expand Up @@ -252,6 +241,11 @@ push-nfs-provisioner:
make push
.PHONY: push-nfs-provisioner

push-flex-provisioner:
cd flex; \
make push
.PHONY: push-flex-provisioner

push-openebs-provisioner:
cd openebs; \
make push
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
This repository houses community-maintained external provisioners plus a helper library for building them. Each provisioner is contained in its own directory so for information on how to use one, enter its directory and read its documentation. The library is contained in the `lib` directory.

### What is an 'external provisioner'?
An external provisioner is a dynamic PV provisioner whose code lives out-of-tree/external to Kubernetes. Unlike [in-tree dynamic provisioners](https://kubernetes.io/docs/user-guide/persistent-volumes/#aws) that run as part of the Kubernetes controller manager, external ones can be deployed & updated independently.
An external provisioner is a dynamic PV provisioner whose code lives out-of-tree/external to Kubernetes. Unlike [in-tree dynamic provisioners](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner) that run as part of the Kubernetes controller manager, external ones can be deployed & updated independently.

External provisioners work just like in-tree dynamic PV provisioners. A `StorageClass` object can specify an external provisioner instance to be its `provisioner` like it can in-tree provisioners. The instance will then watch for `PersistentVolumeClaims` that ask for the `StorageClass` and automatically create `PersistentVolumes` for them. For more information on how dynamic provisioning works, see [the docs](http://kubernetes.io/docs/user-guide/persistent-volumes/) or [this blog post](http://blog.kubernetes.io/2016/10/dynamic-provisioning-and-storage-in-kubernetes.html).

Expand Down
14 changes: 14 additions & 0 deletions SECURITY_CONTACTS
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# Defined below are the security contacts for this repo.
#
# They are the contact point for the Product Security Team to reach out
# to for triaging and handling of incoming issues.
#
# The below names agree to abide by the
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
# and will be removed and replaced if they violate that agreement.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/

wongma7
jsafrane
2 changes: 1 addition & 1 deletion aws/efs/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ all build:
-v $$(pwd):/go/bin \
-v $$(pwd)/.go/stdlib:/usr/local/go/pkg/linux_amd64_asdf \
-w /go/src/github.com/kubernetes-incubator/external-storage/aws/efs \
golang:1.8.3-alpine \
golang:1.10.3-alpine \
go install -installsuffix "asdf" ./cmd/efs-provisioner
.PHONY: all build

Expand Down
1 change: 1 addition & 0 deletions aws/efs/cmd/efs-provisioner/efs-provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,7 @@ func (p *efsProvisioner) Provision(options controller.VolumeOptions) (*v1.Persis
ReadOnly: false,
},
},
MountOptions: []string{"vers=4.1"},
},
}
if gidAllocate {
Expand Down
2 changes: 1 addition & 1 deletion ceph/cephfs/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

FROM centos:7

ENV CEPH_VERSION "luminous"
ENV CEPH_VERSION "mimic"
RUN rpm -Uvh https://download.ceph.com/rpm-$CEPH_VERSION/el7/noarch/ceph-release-1-1.el7.noarch.rpm && \
yum install -y epel-release && \
yum install -y --nogpgcheck ceph-common python-cephfs && \
Expand Down
1 change: 1 addition & 0 deletions ceph/cephfs/OWNERS
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
approvers:
- rootfs
- cofyc
80 changes: 57 additions & 23 deletions ceph/cephfs/cephfs-provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"fmt"
"os"
"os/exec"
"strconv"
"strings"

"github.com/golang/glog"
Expand Down Expand Up @@ -60,13 +61,16 @@ type cephFSProvisioner struct {
identity string
// Namespace secrets will be created in. If empty, secrets will be created in each PVC's namespace.
secretNamespace string
// enable PVC quota
enableQuota bool
}

func newCephFSProvisioner(client kubernetes.Interface, id string, secretNamespace string) controller.Provisioner {
func newCephFSProvisioner(client kubernetes.Interface, id string, secretNamespace string, enableQuota bool) controller.Provisioner {
return &cephFSProvisioner{
client: client,
identity: id,
secretNamespace: secretNamespace,
enableQuota: enableQuota,
}
}

Expand Down Expand Up @@ -112,23 +116,39 @@ func (p *cephFSProvisioner) Provision(options controller.VolumeOptions) (*v1.Per
if options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim Selector is not supported")
}
cluster, adminID, adminSecret, mon, err := p.parseParameters(options.Parameters)
cluster, adminID, adminSecret, pvcRoot, mon, deterministicNames, err := p.parseParameters(options.Parameters)
if err != nil {
return nil, err
}
// create random share name
share := fmt.Sprintf("kubernetes-dynamic-pvc-%s", uuid.NewUUID())
// create random user id
user := fmt.Sprintf("kubernetes-dynamic-user-%s", uuid.NewUUID())
var share, user string
if deterministicNames {
share = fmt.Sprintf(options.PVC.Name)
user = fmt.Sprintf("k8s.%s.%s", options.PVC.Namespace, options.PVC.Name)
} else {
// create random share name
share = fmt.Sprintf("kubernetes-dynamic-pvc-%s", uuid.NewUUID())
// create random user id
user = fmt.Sprintf("kubernetes-dynamic-user-%s", uuid.NewUUID())
}
// provision share
// create cmd
cmd := exec.Command(provisionCmd, "-n", share, "-u", user)
args := []string{"-n", share, "-u", user}
if p.enableQuota {
capacity := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := strconv.FormatInt(capacity.Value(), 10)
args = append(args, "-s", requestBytes)
}
cmd := exec.Command(provisionCmd, args...)
// set env
cmd.Env = []string{
"CEPH_CLUSTER_NAME=" + cluster,
"CEPH_MON=" + strings.Join(mon[:], ","),
"CEPH_AUTH_ID=" + adminID,
"CEPH_AUTH_KEY=" + adminSecret}
"CEPH_AUTH_KEY=" + adminSecret,
"CEPH_VOLUME_ROOT=" + pvcRoot}
if deterministicNames {
cmd.Env = append(cmd.Env, "CEPH_VOLUME_GROUP="+options.PVC.Namespace)
}

output, cmdErr := cmd.CombinedOutput()
if cmdErr != nil {
Expand Down Expand Up @@ -175,16 +195,18 @@ func (p *cephFSProvisioner) Provision(options controller.VolumeOptions) (*v1.Per
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy,
AccessModes: options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{ //FIXME: kernel cephfs doesn't enforce quota, capacity is not meaningless here.
Capacity: v1.ResourceList{
// Quotas are supported by the userspace client(ceph-fuse, libcephfs), or kernel client >= 4.17 but only on mimic clusters.
// In other cases capacity is meaningless here.
// If quota is enabled, provisioner will set ceph.quota.max_bytes on volume path.
v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],
},
PersistentVolumeSource: v1.PersistentVolumeSource{
CephFS: &v1.CephFSPersistentVolumeSource{
Monitors: mon,
Path: res.Path[strings.Index(res.Path, "/"):],
SecretRef: &v1.SecretReference{
Name: secretName,
// TODO https://github.com/kubernetes-incubator/external-storage/issues/309
Name: secretName,
Namespace: nameSpace,
},
User: user,
Expand Down Expand Up @@ -219,7 +241,7 @@ func (p *cephFSProvisioner) Delete(volume *v1.PersistentVolume) error {
if err != nil {
return err
}
cluster, adminID, adminSecret, mon, err := p.parseParameters(class.Parameters)
cluster, adminID, adminSecret, pvcRoot, mon, _, err := p.parseParameters(class.Parameters)
if err != nil {
return err
}
Expand All @@ -231,7 +253,8 @@ func (p *cephFSProvisioner) Delete(volume *v1.PersistentVolume) error {
"CEPH_CLUSTER_NAME=" + cluster,
"CEPH_MON=" + strings.Join(mon[:], ","),
"CEPH_AUTH_ID=" + adminID,
"CEPH_AUTH_KEY=" + adminSecret}
"CEPH_AUTH_KEY=" + adminSecret,
"CEPH_VOLUME_ROOT=" + pvcRoot}

output, cmdErr := cmd.CombinedOutput()
if cmdErr != nil {
Expand All @@ -254,16 +277,19 @@ func (p *cephFSProvisioner) Delete(volume *v1.PersistentVolume) error {
return nil
}

func (p *cephFSProvisioner) parseParameters(parameters map[string]string) (string, string, string, []string, error) {
func (p *cephFSProvisioner) parseParameters(parameters map[string]string) (string, string, string, string, []string, bool, error) {
var (
err error
mon []string
cluster, adminID, adminSecretName, adminSecretNamespace, adminSecret string
err error
mon []string
cluster, adminID, adminSecretName, adminSecretNamespace, adminSecret, pvcRoot string
deterministicNames bool
)

adminSecretNamespace = "default"
adminID = "admin"
cluster = "ceph"
pvcRoot = "/volumes/kubernetes"
deterministicNames = false

for k, v := range parameters {
switch strings.ToLower(k) {
Expand All @@ -280,21 +306,26 @@ func (p *cephFSProvisioner) parseParameters(parameters map[string]string) (strin
adminSecretName = v
case "adminsecretnamespace":
adminSecretNamespace = v
case "claimroot":
pvcRoot = v
case "deterministicnames":
// On error, strconv.ParseBool() returns false; leave that, as it is a perfectly fine default
deterministicNames, _ = strconv.ParseBool(v)
default:
return "", "", "", nil, fmt.Errorf("invalid option %q", k)
return "", "", "", "", nil, false, fmt.Errorf("invalid option %q", k)
}
}
// sanity check
if adminSecretName == "" {
return "", "", "", nil, fmt.Errorf("missing Ceph admin secret name")
return "", "", "", "", nil, false, fmt.Errorf("missing Ceph admin secret name")
}
if adminSecret, err = p.parsePVSecret(adminSecretNamespace, adminSecretName); err != nil {
return "", "", "", nil, fmt.Errorf("failed to get admin secret from [%q/%q]: %v", adminSecretNamespace, adminSecretName, err)
return "", "", "", "", nil, false, fmt.Errorf("failed to get admin secret from [%q/%q]: %v", adminSecretNamespace, adminSecretName, err)
}
if len(mon) < 1 {
return "", "", "", nil, fmt.Errorf("missing Ceph monitors")
return "", "", "", "", nil, false, fmt.Errorf("missing Ceph monitors")
}
return cluster, adminID, adminSecret, mon, nil
return cluster, adminID, adminSecret, pvcRoot, mon, deterministicNames, nil
}

func (p *cephFSProvisioner) parsePVSecret(namespace, secretName string) (string, error) {
Expand All @@ -318,6 +349,8 @@ var (
kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig")
id = flag.String("id", "", "Unique provisioner identity")
secretNamespace = flag.String("secret-namespace", "", "Namespace secrets will be created in (default: '', created in each PVC's namespace)")
enableQuota = flag.Bool("enable-quota", false, "Enable PVC quota")
metricsPort = flag.Int("metrics-port", 0, "The port of the metrics server (set to non-zero to enable)")
)

func main() {
Expand Down Expand Up @@ -368,7 +401,7 @@ func main() {
// Create the provisioner: it implements the Provisioner interface expected by
// the controller
glog.Infof("Creating CephFS provisioner %s with identity: %s, secret namespace: %s", prName, prID, *secretNamespace)
cephFSProvisioner := newCephFSProvisioner(clientset, prID, *secretNamespace)
cephFSProvisioner := newCephFSProvisioner(clientset, prID, *secretNamespace, *enableQuota)

// Start the provision controller which will dynamically provision cephFS
// PVs
Expand All @@ -377,6 +410,7 @@ func main() {
prName,
cephFSProvisioner,
serverVersion.GitVersion,
controller.MetricsPort(int32(*metricsPort)),
)

pc.Run(wait.NeverStop)
Expand Down
Loading

0 comments on commit bcb41cf

Please sign in to comment.