diff --git a/go.mod b/go.mod index 35292d901..66e722352 100644 --- a/go.mod +++ b/go.mod @@ -1,17 +1,18 @@ module github.com/containernetworking/plugins -go 1.20 +go 1.21 + +toolchain go1.22.1 require ( github.com/Microsoft/hcsshim v0.12.0 github.com/alexflint/go-filemutex v1.3.0 github.com/buger/jsonparser v1.1.1 - github.com/containernetworking/cni v1.1.2 + github.com/containernetworking/cni v1.2.0-rc1 github.com/coreos/go-iptables v0.7.0 github.com/coreos/go-systemd/v22 v22.5.0 github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c github.com/d2g/dhcp4client v1.0.0 - github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5 github.com/godbus/dbus/v5 v5.1.0 github.com/mattn/go-shellwords v1.0.12 github.com/networkplumbing/go-nft v0.4.0 @@ -19,15 +20,18 @@ require ( github.com/onsi/gomega v1.31.1 github.com/opencontainers/selinux v1.11.0 github.com/safchain/ethtool v0.3.0 + github.com/tidwall/gjson v1.17.1 + github.com/tidwall/sjson v1.2.5 github.com/vishvananda/netlink v1.2.1-beta.2 + golang.org/x/net v0.20.0 golang.org/x/sys v0.17.0 ) require ( + github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/containerd/cgroups/v3 v3.0.2 // indirect github.com/containerd/errdefs v0.1.0 // indirect - github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -37,10 +41,11 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/stretchr/testify v1.8.2 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.17.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect diff --git a/go.sum b/go.sum index da0a7d429..2abe559cb 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.12.0 h1:rbICA+XZFwrBef2Odk++0LjFvClNCJGRK+fsrP254Ts= @@ -9,17 +11,14 @@ github.com/alexflint/go-filemutex v1.3.0/go.mod h1:U0+VA/i30mGBlLCrFPGtTe9y6wGQf github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= -github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= -github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/cni v1.2.0-rc1 h1:AKI3+pXtgY4PDLN9+50o9IaywWVuey0Jkw3Lvzp0HCY= +github.com/containernetworking/cni v1.2.0-rc1/go.mod h1:Lt0TQcZQVDju64fYxUhDziTgXCDe3Olzi9I4zZJLWHg= github.com/coreos/go-iptables v0.7.0 h1:XWM3V+MPRr5/q51NuWSgU0fqMad64Zyxs8ZUoMsamr8= github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -28,10 +27,6 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c h1:Xo2rK1pzOm0jO6abTPIQw github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0 h1:suYBsYZIkSlUMEz4TAYCczKf62IA2UWC+O8+KtdOhCo= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5 h1:+CpLbZIeUn94m02LdEKPcgErLJ347NUwxPKs5u8ieiY= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4 h1:itqmmf1PFpC4n5JW+j4BU7X4MTfVurhYRTjODoPb2Y8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -39,11 +34,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -62,10 +54,8 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -77,27 +67,15 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/networkplumbing/go-nft v0.4.0 h1:kExVMwXW48DOAukkBwyI16h4uhE5lN9iMvQd52lpTyU= github.com/networkplumbing/go-nft v0.4.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM= github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= @@ -115,7 +93,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -123,12 +100,20 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= +github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= @@ -139,47 +124,34 @@ golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -189,7 +161,6 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -198,7 +169,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= @@ -235,12 +205,7 @@ google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7 google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/ip/ipmasq_linux.go b/pkg/ip/ipmasq_linux.go index aa59a8db5..b03c935f3 100644 --- a/pkg/ip/ipmasq_linux.go +++ b/pkg/ip/ipmasq_linux.go @@ -114,6 +114,46 @@ func TeardownIPMasq(ipn *net.IPNet, chain string, comment string) error { return nil } +func CheckIPMasq(ipn *net.IPNet, chain, comment string) error { + isV6 := ipn.IP.To4() == nil + + var ipt *iptables.IPTables + var err error + var multicastNet string + var ip string // the ip and its full-length prefix + + if isV6 { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + multicastNet = "ff00::/8" + ip = ipn.IP.String() + "/128" + } else { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4) + multicastNet = "224.0.0.0/4" + ip = ipn.IP.String() + "/32" + } + if err != nil { + return fmt.Errorf("failed to locate iptables: %v", err) + } + + ok, err := ipt.Exists("nat", chain, "!", "-d", multicastNet, "-j", "MASQUERADE", "-m", "comment", "--comment", comment) + if err != nil { + return fmt.Errorf("could not check for expected rule: %w", err) + } + if !ok { + return fmt.Errorf("expected rule did not exist in chain %s", chain) + } + + ok, err = ipt.Exists("nat", "POSTROUTING", "-s", ip, "-j", chain, "-m", "comment", "--comment", comment) + if err != nil { + return fmt.Errorf("could not check for expected rule [-A POSTROUTING ]: %w", err) + } + if !ok { + want := []string{"-A", "POSTROUTING", "-s", ip, "-j", chain, "-m", "comment", "--comment", comment} + return fmt.Errorf("expected rule %v did not exist in chain POSTROUTING", want) + } + return nil +} + // isNotExist returnst true if the error is from iptables indicating // that the target does not exist. func isNotExist(err error) bool { diff --git a/pkg/ipam/ipam.go b/pkg/ipam/ipam.go index e39d36b08..d53f7a73b 100644 --- a/pkg/ipam/ipam.go +++ b/pkg/ipam/ipam.go @@ -32,3 +32,11 @@ func ExecCheck(plugin string, netconf []byte) error { func ExecDel(plugin string, netconf []byte) error { return invoke.DelegateDel(context.TODO(), plugin, netconf, nil) } + +func ExecStatus(plugin string, netconf []byte) error { + return invoke.DelegateStatus(context.TODO(), plugin, netconf, nil) +} + +func ExecGC(plugin string, netconf []byte) error { + return invoke.DelegateGC(context.TODO(), plugin, netconf, nil) +} diff --git a/pkg/ipam/ipam_linux.go b/pkg/ipam/ipam_linux.go index 6c2bfe72f..f902f408d 100644 --- a/pkg/ipam/ipam_linux.go +++ b/pkg/ipam/ipam_linux.go @@ -117,6 +117,15 @@ func ConfigureIface(ifName string, res *current.Result) error { Dst: &r.Dst, LinkIndex: link.Attrs().Index, Gw: gw, + MTU: r.MTU, + AdvMSS: r.AdvMSS, + Priority: r.Priority, + } + if r.Scope != nil { + route.Scope = netlink.Scope(*r.Scope) + } + if r.Table != nil { + route.Table = *r.Table } if err = netlink.RouteAddEcmp(&route); err != nil { diff --git a/pkg/testutils/cmd.go b/pkg/testutils/cmd.go index 6f65d6ddd..76a6cd9a6 100644 --- a/pkg/testutils/cmd.go +++ b/pkg/testutils/cmd.go @@ -114,3 +114,21 @@ func CmdDel(cniNetns, cniContainerID, cniIfname string, f func() error) error { func CmdDelWithArgs(args *skel.CmdArgs, f func() error) error { return CmdDel(args.Netns, args.ContainerID, args.IfName, f) } + +func CmdGC(f func() error) error { + os.Setenv("CNI_COMMAND", "GC") + os.Setenv("CNI_PATH", os.Getenv("PATH")) + os.Setenv("CNI_NETNS_OVERRIDE", "1") + defer envCleanup() + + return f() +} + +func CmdStatus(f func() error) error { + os.Setenv("CNI_COMMAND", "STATUS") + os.Setenv("CNI_PATH", os.Getenv("PATH")) + os.Setenv("CNI_NETNS_OVERRIDE", "1") + defer envCleanup() + + return f() +} diff --git a/pkg/testutils/dhcp4server/leasepool/lease.go b/pkg/testutils/dhcp4server/leasepool/lease.go new file mode 100644 index 000000000..a4381c6e0 --- /dev/null +++ b/pkg/testutils/dhcp4server/leasepool/lease.go @@ -0,0 +1,102 @@ +package leasepool + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "net" + "time" +) + +type LeaseStatus int + +const ( + Free LeaseStatus = 0 + Reserved LeaseStatus = 1 + Active LeaseStatus = 2 +) + +type Lease struct { + IP net.IP // The IP of the Lease + Status LeaseStatus // Are Reserved, Active or Free + MACAddress net.HardwareAddr // Mac Address of the Device + ClientID []byte // ClientID of the request + Hostname string // Hostname From option 12 + Expiry time.Time // Expiry Time +} + +// leaseMarshal is a mirror of Lease used for marshalling, since +// net.HardwareAddr has no native marshalling capability. +type leaseMarshal struct { + IP string + Status int + MACAddress string + ClientID string + Hostname string + Expiry time.Time +} + +func (l Lease) MarshalJSON() ([]byte, error) { + return json.Marshal(leaseMarshal{ + IP: l.IP.String(), + Status: int(l.Status), + MACAddress: l.MACAddress.String(), + ClientID: hex.EncodeToString(l.ClientID), + Hostname: l.Hostname, + Expiry: l.Expiry, + }) +} + +func (l *Lease) UnmarshalJSON(data []byte) error { + stringUnMarshal := leaseMarshal{} + err := json.Unmarshal(data, &stringUnMarshal) + if err != nil { + return err + } + + l.IP = net.ParseIP(stringUnMarshal.IP) + l.Status = LeaseStatus(stringUnMarshal.Status) + if stringUnMarshal.MACAddress != "" { + l.MACAddress, err = net.ParseMAC(stringUnMarshal.MACAddress) + if err != nil { + return fmt.Errorf("error parsing MAC address: %v", err) + } + } + l.ClientID, err = hex.DecodeString(stringUnMarshal.ClientID) + if err != nil { + return fmt.Errorf("error decoding clientID: %v", err) + } + l.Hostname = stringUnMarshal.Hostname + l.Expiry = stringUnMarshal.Expiry + + return nil +} + +func (l Lease) Equal(other Lease) bool { + if !l.IP.Equal(other.IP) { + return false + } + + if int(l.Status) != int(other.Status) { + return false + } + + if l.MACAddress.String() != other.MACAddress.String() { + return false + } + + if !bytes.Equal(l.ClientID, other.ClientID) { + return false + } + + if l.Hostname != other.Hostname { + return false + } + + if !l.Expiry.Equal(other.Expiry) { + return false + } + + return true +} diff --git a/vendor/github.com/d2g/dhcp4server/leasepool/leasepool.go b/pkg/testutils/dhcp4server/leasepool/leasepool.go similarity index 75% rename from vendor/github.com/d2g/dhcp4server/leasepool/leasepool.go rename to pkg/testutils/dhcp4server/leasepool/leasepool.go index a620b4b92..f967f5e93 100644 --- a/vendor/github.com/d2g/dhcp4server/leasepool/leasepool.go +++ b/pkg/testutils/dhcp4server/leasepool/leasepool.go @@ -8,13 +8,13 @@ import ( * Lease.IP is the Key. */ type LeasePool interface { - //Add A Lease To The Pool + // Add A Lease To The Pool AddLease(Lease) error - //Remove + // Remove RemoveLease(net.IP) error - //Remove All Leases from the Pool (Required for Persistant LeaseManagers) + // Remove All Leases from the Pool (Required for Persistent LeaseManagers) PurgeLeases() error /* @@ -25,7 +25,7 @@ type LeasePool interface { */ GetLease(net.IP) (bool, Lease, error) - //Get the lease already in use by that hardware address and/or client identifier. + // Get the lease already in use by that hardware address and/or client identifier. GetLeaseForClient(net.HardwareAddr, []byte) (bool, Lease, error) /* diff --git a/vendor/github.com/d2g/dhcp4server/leasepool/memorypool/memorypool.go b/pkg/testutils/dhcp4server/leasepool/memorypool/memorypool.go similarity index 83% rename from vendor/github.com/d2g/dhcp4server/leasepool/memorypool/memorypool.go rename to pkg/testutils/dhcp4server/leasepool/memorypool/memorypool.go index 7cd8d2d5a..fc6edc3a2 100644 --- a/vendor/github.com/d2g/dhcp4server/leasepool/memorypool/memorypool.go +++ b/pkg/testutils/dhcp4server/leasepool/memorypool/memorypool.go @@ -3,9 +3,10 @@ package memorypool import ( "bytes" "errors" - "github.com/d2g/dhcp4server/leasepool" "net" "sync" + + "github.com/containernetworking/plugins/pkg/testutils/dhcp4server/leasepool" ) type MemoryPool struct { @@ -13,7 +14,7 @@ type MemoryPool struct { poolLock sync.Mutex } -//Add A Lease To The Pool +// Add A Lease To The Pool func (t *MemoryPool) AddLease(newLease leasepool.Lease) error { t.poolLock.Lock() defer t.poolLock.Unlock() @@ -24,7 +25,7 @@ func (t *MemoryPool) AddLease(newLease leasepool.Lease) error { for i := range t.pool { if t.pool[i].IP.Equal(newLease.IP) { - //Lease Already Exists In Pool + // Lease Already Exists In Pool return errors.New("Error: Lease IP \"" + newLease.IP.String() + "\" alreay exists in Pool") } } @@ -33,7 +34,7 @@ func (t *MemoryPool) AddLease(newLease leasepool.Lease) error { return nil } -//Remove a Lease From The Pool +// Remove a Lease From The Pool func (t *MemoryPool) RemoveLease(leaseIP net.IP) error { t.poolLock.Lock() defer t.poolLock.Unlock() @@ -41,10 +42,10 @@ func (t *MemoryPool) RemoveLease(leaseIP net.IP) error { for i := range t.pool { if t.pool[i].IP.Equal(leaseIP) { - //Move the Last Element to This Position. + // Move the Last Element to This Position. t.pool[i] = t.pool[len(t.pool)-1] - //Shortern the Pool By One. + // Shortern the Pool By One. t.pool = t.pool[0:(len(t.pool) - 1)] return nil } @@ -53,7 +54,7 @@ func (t *MemoryPool) RemoveLease(leaseIP net.IP) error { return errors.New("Error: Lease IP \"" + leaseIP.String() + "\" Is Not In The Pool") } -//Remove All Leases from the Pool (Required for Persistant LeaseManagers) +// Remove All Leases from the Pool (Required for Persistent LeaseManagers) func (t *MemoryPool) PurgeLeases() error { t.poolLock.Lock() defer t.poolLock.Unlock() @@ -89,7 +90,7 @@ func makeKey(macAddress net.HardwareAddr, clientID []byte) []byte { return key } -//Get the lease already in use by that hardware address and/or client identifier. +// Get the lease already in use by that hardware address and/or client identifier. func (t *MemoryPool) GetLeaseForClient(macAddress net.HardwareAddr, clientID []byte) (bool, leasepool.Lease, error) { t.poolLock.Lock() defer t.poolLock.Unlock() @@ -113,15 +114,15 @@ func (t *MemoryPool) GetNextFreeLease() (bool, leasepool.Lease, error) { t.poolLock.Lock() defer t.poolLock.Unlock() - //Loop Through the elements backwards. + // Loop Through the elements backwards. for i := (len(t.pool) - 1); i >= 0; i-- { - //If the Lease Is Free + // If the Lease Is Free if t.pool[i].Status == leasepool.Free { - //Take the Element + // Take the Element iLease := t.pool[i] - //Shrink the Pool By 1 + // Shrink the Pool By 1 t.pool = t.pool[:(len(t.pool) - 1)] - //Place the Lease At the Begining (This saves us having some sort of counter...) + // Place the Lease At the Beginning (This saves us having some sort of counter...) t.pool = append([]leasepool.Lease{iLease}, t.pool...) return true, iLease, nil } diff --git a/vendor/github.com/d2g/dhcp4server/server.go b/pkg/testutils/dhcp4server/server.go similarity index 75% rename from vendor/github.com/d2g/dhcp4server/server.go rename to pkg/testutils/dhcp4server/server.go index 7b1d374d9..39683fef1 100644 --- a/vendor/github.com/d2g/dhcp4server/server.go +++ b/pkg/testutils/dhcp4server/server.go @@ -1,5 +1,7 @@ package dhcp4server +// Based off of MPL https://github.com/d2g/dhcp4server + import ( "bytes" "errors" @@ -9,36 +11,36 @@ import ( "time" "github.com/d2g/dhcp4" - "github.com/d2g/dhcp4server/leasepool" - "golang.org/x/net/ipv4" + + "github.com/containernetworking/plugins/pkg/testutils/dhcp4server/leasepool" ) /* * The DHCP Server Structure */ type Server struct { - //Configuration Options - ip net.IP //The IP Address We Tell Clients The Server Is On. - defaultGateway net.IP //The Default Gateway Address - dnsServers []net.IP //DNS Servers - subnetMask net.IP //ie. 255.255.255.0 - leaseDuration time.Duration //Number of Seconds - ignoreIPs []net.IP //Slice of IP's that should be ignored by the Server. - ignoreHardwareAddress []net.HardwareAddr //Slice of Hardware Addresses we should ignore. - - //Local Address + // Configuration Options + ip net.IP // The IP Address We Tell Clients The Server Is On. + defaultGateway net.IP // The Default Gateway Address + dnsServers []net.IP // DNS Servers + subnetMask net.IP // ie. 255.255.255.0 + leaseDuration time.Duration // Number of Seconds + ignoreIPs []net.IP // Slice of IP's that should be ignored by the Server. + ignoreHardwareAddress []net.HardwareAddr // Slice of Hardware Addresses we should ignore. + + // Local Address laddr net.UDPAddr - //Remote address + // Remote address raddr net.UDPAddr - //LeasePool - leasePool leasepool.LeasePool //Lease Pool Manager + // LeasePool + leasePool leasepool.LeasePool // Lease Pool Manager - //Used to Gracefully Close the Server + // Used to Gracefully Close the Server shutdown uint32 - //Listeners & Response Connection. + // Listeners & Response Connection. connection *ipv4.PacketConn } @@ -47,7 +49,7 @@ func New(ip net.IP, l leasepool.LeasePool, options ...func(*Server) error) (*Ser s := Server{ ip: ip, defaultGateway: ip, - dnsServers: []net.IP{net.IPv4(208, 67, 222, 222), net.IPv4(208, 67, 220, 220)}, //OPENDNS + dnsServers: []net.IP{net.IPv4(208, 67, 222, 222), net.IPv4(208, 67, 220, 220)}, // OPENDNS subnetMask: net.IPv4(255, 255, 255, 0), leaseDuration: 24 * time.Hour, leasePool: l, @@ -72,39 +74,6 @@ func (s *Server) setOptions(options ...func(*Server) error) error { return nil } -// Set the Server IP -func IP(i net.IP) func(*Server) error { - return func(s *Server) error { - s.ip = i - return nil - } - return nil -} - -// Set the Default Gateway Address. -func DefaultGateway(r net.IP) func(*Server) error { - return func(s *Server) error { - s.defaultGateway = r - return nil - } -} - -// Set the DNS servers. -func DNSServers(dnss []net.IP) func(*Server) error { - return func(s *Server) error { - s.dnsServers = dnss - return nil - } -} - -// Set the Subnet Mask -func SubnetMask(m net.IP) func(*Server) error { - return func(s *Server) error { - s.subnetMask = m - return nil - } -} - // Set Lease Duration func LeaseDuration(d time.Duration) func(*Server) error { return func(s *Server) error { @@ -113,30 +82,6 @@ func LeaseDuration(d time.Duration) func(*Server) error { } } -// Set Ignore IPs -func IgnoreIPs(ips []net.IP) func(*Server) error { - return func(s *Server) error { - s.ignoreIPs = ips - return nil - } -} - -// Set Ignore Hardware Addresses -func IgnoreHardwareAddresses(h []net.HardwareAddr) func(*Server) error { - return func(s *Server) error { - s.ignoreHardwareAddress = h - return nil - } -} - -// Set LeasePool -func LeasePool(p leasepool.LeasePool) func(*Server) error { - return func(s *Server) error { - s.leasePool = p - return nil - } -} - // Set The Local Address func SetLocalAddr(a net.UDPAddr) func(*Server) error { return func(s *Server) error { @@ -167,11 +112,6 @@ func (s *Server) ListenAndServe() error { s.connection = ipv4.NewPacketConn(connection) defer s.connection.Close() - //We Currently Don't Use this Feature Which is the only bit that is Linux Only. - //if err := s.connection.SetControlMessage(ipv4.FlagInterface, true); err != nil { - // return err - //} - log.Println("Trace: DHCP Server Listening.") for { @@ -180,16 +120,14 @@ func (s *Server) ListenAndServe() error { return nil } - //Make Our Buffer (Max Buffer is 574) "I believe this 576 size comes from RFC 791" - Random Mailing list quote of the day. + // Make Our Buffer (Max Buffer is 574) "I believe this 576 size comes from RFC 791" - Random Mailing list quote of the day. buffer := make([]byte, 576) - //Set Read Deadline + // Set Read Deadline s.connection.SetReadDeadline(time.Now().Add(time.Second)) // Read Packet - n, control_message, source, err := s.connection.ReadFrom(buffer) - + n, controlMessage, source, err := s.connection.ReadFrom(buffer) if err != nil { - switch v := err.(type) { case *net.OpError: // If we've been signaled to shut down, ignore @@ -216,15 +154,15 @@ func (s *Server) ListenAndServe() error { return err } - //We seem to have an issue with undersized packets? + // We seem to have an issue with undersized packets? if n < 240 { log.Printf("Error: Invalid Packet Size \"%d\" Received:%v\n", n, buffer[:n]) continue } - //We should ignore some requests - //It shouldn't be possible to ignore IP's because they shouldn't have them as we're the DHCP server. - //However, they can have i.e. if you're the client & server :S. + // We should ignore some requests + // It shouldn't be possible to ignore IP's because they shouldn't have them as we're the DHCP server. + // However, they can have i.e. if you're the client & server :S. for _, ipToIgnore := range s.ignoreIPs { if ipToIgnore.Equal(source.(*net.UDPAddr).IP) { log.Println("Debug: Ignoring DHCP Request From IP:" + ipToIgnore.String()) @@ -234,8 +172,8 @@ func (s *Server) ListenAndServe() error { packet := dhcp4.Packet(buffer[:n]) - //We can ignore hardware addresses. - //Usefull for ignoring a range of hardware addresses + // We can ignore hardware addresses. + // Useful for ignoring a range of hardware addresses for _, hardwareAddressToIgnore := range s.ignoreHardwareAddress { if bytes.Equal(hardwareAddressToIgnore, packet.CHAddr()) { log.Println("Debug: Ignoring DHCP Request From Hardware Address:" + hardwareAddressToIgnore.String()) @@ -251,7 +189,7 @@ func (s *Server) ListenAndServe() error { log.Printf("Trace: Packet Gateway IP: %v\n", packet.GIAddr().String()) log.Printf("Trace: Packet Client Mac: %v\n", packet.CHAddr().String()) - //We need to stop butting in with other servers. + // We need to stop butting in with other servers. if packet.SIAddr().Equal(net.IPv4(0, 0, 0, 0)) || packet.SIAddr().Equal(net.IP{}) || packet.SIAddr().Equal(s.ip) { returnPacket, err := s.ServeDHCP(packet) @@ -269,7 +207,7 @@ func (s *Server) ListenAndServe() error { log.Printf("Trace: Packet Gateway IP: %v\n", returnPacket.GIAddr().String()) log.Printf("Trace: Packet Client Mac: %v\n", returnPacket.CHAddr().String()) - _, err = s.connection.WriteTo(returnPacket, control_message, &s.raddr) + _, err = s.connection.WriteTo(returnPacket, controlMessage, &s.raddr) if err != nil { log.Println("Debug: Error Writing:" + err.Error()) return err @@ -293,8 +231,8 @@ func (s *Server) ServeDHCP(packet dhcp4.Packet) (dhcp4.Packet, error) { switch dhcp4.MessageType(packetOptions[dhcp4.OptionDHCPMessageType][0]) { case dhcp4.Discover: - //Discover Received from client - //Lets get the lease we're going to send them + // Discover Received from client + // Lets get the lease we're going to send them found, lease, err := s.GetLease(packet) if err != nil { return dhcp4.Packet{}, err @@ -308,14 +246,14 @@ func (s *Server) ServeDHCP(packet dhcp4.Packet) (dhcp4.Packet, error) { offerPacket := s.OfferPacket(packet) offerPacket.SetYIAddr(lease.IP) - //Sort out the packet options + // Sort out the packet options offerPacket.PadToMinSize() lease.Status = leasepool.Reserved lease.MACAddress = packet.CHAddr() lease.ClientID = getClientID(packetOptions) - //If the lease expires within the next 5 Mins increase the lease expiary (Giving the Client 5 mins to complete) + // If the lease expires within the next 5 Mins increase the lease expiary (Giving the Client 5 mins to complete) if lease.Expiry.Before(time.Now().Add(time.Minute * 5)) { lease.Expiry = time.Now().Add(time.Minute * 5) } @@ -330,14 +268,14 @@ func (s *Server) ServeDHCP(packet dhcp4.Packet) (dhcp4.Packet, error) { } if !updated { - //Unable to reserve lease (It's now active else where maybe?) + // Unable to reserve lease (It's now active else where maybe?) return dhcp4.Packet{}, errors.New("Unable to Reserve Lease:" + lease.IP.String()) } return offerPacket, nil case dhcp4.Request: - //Request Received from client - //Lets get the lease we're going to send them + // Request Received from client + // Lets get the lease we're going to send them found, lease, err := s.GetLease(packet) if err != nil { return dhcp4.Packet{}, err @@ -348,9 +286,9 @@ func (s *Server) ServeDHCP(packet dhcp4.Packet) (dhcp4.Packet, error) { return dhcp4.Packet{}, nil } - //If the lease is not the one requested We should send a NAK.. + // If the lease is not the one requested We should send a NAK.. if len(packetOptions) > 0 && !net.IP(packetOptions[dhcp4.OptionRequestedIPAddress]).Equal(lease.IP) { - //NAK + // NAK declinePacket := s.DeclinePacket(packet) declinePacket.PadToMinSize() @@ -372,30 +310,45 @@ func (s *Server) ServeDHCP(packet dhcp4.Packet) (dhcp4.Packet, error) { } if updated { - //ACK + // ACK acknowledgementPacket := s.AcknowledgementPacket(packet) acknowledgementPacket.SetYIAddr(lease.IP) - //Lease time. - acknowledgementPacket.AddOption(dhcp4.OptionIPAddressLeaseTime, dhcp4.OptionsLeaseTime(lease.Expiry.Sub(time.Now()))) + // Lease time. + acknowledgementPacket.AddOption(dhcp4.OptionIPAddressLeaseTime, dhcp4.OptionsLeaseTime(time.Until(lease.Expiry))) acknowledgementPacket.PadToMinSize() return acknowledgementPacket, nil - } else { - //NAK - declinePacket := s.DeclinePacket(packet) - declinePacket.PadToMinSize() - - return declinePacket, nil } + // NAK + declinePacket := s.DeclinePacket(packet) + declinePacket.PadToMinSize() + + return declinePacket, nil + } case dhcp4.Decline: - //Decline from the client: + // Decline from the client: log.Printf("Debug: Decline Message:%v\n", packet) case dhcp4.Release: - //Decline from the client: log.Printf("Debug: Release Message:%v\n", packet) + // Release Received from client + // Lets get the lease we're going to send them + found, lease, err := s.GetLease(packet) + if err != nil { + return dhcp4.Packet{}, err + } + + ack := s.AcknowledgementPacket(packet) + ack.PadToMinSize() + if !found { + log.Println("Warning: Release of unknown lease") + return ack, nil + } + s.leasePool.RemoveLease(lease.IP) + + return ack, nil default: log.Printf("Debug: Unexpected Packet Type:%v\n", dhcp4.MessageType(packetOptions[dhcp4.OptionDHCPMessageType][0])) @@ -408,7 +361,6 @@ func (s *Server) ServeDHCP(packet dhcp4.Packet) (dhcp4.Packet, error) { * Create DHCP Offer Packet */ func (s *Server) OfferPacket(discoverPacket dhcp4.Packet) dhcp4.Packet { - offerPacket := dhcp4.NewPacket(dhcp4.BootReply) offerPacket.SetXId(discoverPacket.XId()) offerPacket.SetFlags(discoverPacket.Flags()) @@ -417,27 +369,27 @@ func (s *Server) OfferPacket(discoverPacket dhcp4.Packet) dhcp4.Packet { offerPacket.SetGIAddr(discoverPacket.GIAddr()) offerPacket.SetSecs(discoverPacket.Secs()) - //53 + // 53 offerPacket.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Offer)}) - //54 + // 54 offerPacket.AddOption(dhcp4.OptionServerIdentifier, s.ip.To4()) - //51 + // 51 offerPacket.AddOption(dhcp4.OptionIPAddressLeaseTime, dhcp4.OptionsLeaseTime(s.leaseDuration)) - //Other options go in requested order... + // Other options go in requested order... discoverPacketOptions := discoverPacket.ParseOptions() ourOptions := make(dhcp4.Options) - //1 + // 1 ourOptions[dhcp4.OptionSubnetMask] = s.subnetMask.To4() - //3 + // 3 ourOptions[dhcp4.OptionRouter] = s.defaultGateway.To4() - //6 + // 6 ourOptions[dhcp4.OptionDomainNameServer] = dhcp4.JoinIPs(s.dnsServers) if discoverPacketOptions[dhcp4.OptionParameterRequestList] != nil { - //Loop through the requested options and if we have them add them. + // Loop through the requested options and if we have them add them. for _, optionCode := range discoverPacketOptions[dhcp4.OptionParameterRequestList] { if !bytes.Equal(ourOptions[dhcp4.OptionCode(optionCode)], []byte{}) { offerPacket.AddOption(dhcp4.OptionCode(optionCode), ourOptions[dhcp4.OptionCode(optionCode)]) @@ -446,20 +398,18 @@ func (s *Server) OfferPacket(discoverPacket dhcp4.Packet) dhcp4.Packet { } } - //Add all the options not requested. + // Add all the options not requested. for optionCode, optionValue := range ourOptions { offerPacket.AddOption(optionCode, optionValue) } return offerPacket - } /* * Create DHCP Acknowledgement */ func (s *Server) AcknowledgementPacket(requestPacket dhcp4.Packet) dhcp4.Packet { - acknowledgementPacket := dhcp4.NewPacket(dhcp4.BootReply) acknowledgementPacket.SetXId(requestPacket.XId()) acknowledgementPacket.SetFlags(requestPacket.Flags()) @@ -481,7 +431,6 @@ func (s *Server) AcknowledgementPacket(requestPacket dhcp4.Packet) dhcp4.Packet * Create DHCP Decline */ func (s *Server) DeclinePacket(requestPacket dhcp4.Packet) dhcp4.Packet { - declinePacket := dhcp4.NewPacket(dhcp4.BootReply) declinePacket.SetXId(requestPacket.XId()) declinePacket.SetFlags(requestPacket.Flags()) @@ -501,18 +450,18 @@ func (s *Server) DeclinePacket(requestPacket dhcp4.Packet) dhcp4.Packet { /* * Get Lease tries to work out the best lease for the packet supplied. - * Taking into account all Requested IP, Exisitng MACAddresses and Free leases. + * Taking into account all Requested IP, existing MACAddresses and Free leases. */ -func (s *Server) GetLease(packet dhcp4.Packet) (found bool, lease leasepool.Lease, err error) { +func (s *Server) GetLease(packet dhcp4.Packet) (found bool, lease leasepool.Lease, err error) { // nolint:nonamedreturns packetOptions := packet.ParseOptions() clientID := getClientID(packetOptions) - //Requested an IP + // Requested an IP if (len(packetOptions) > 0) && packetOptions[dhcp4.OptionRequestedIPAddress] != nil && !net.IP(packetOptions[dhcp4.OptionRequestedIPAddress]).Equal(net.IP{}) { - //An IP Has Been Requested Let's Try and Get that One. + // An IP Has Been Requested Let's Try and Get that One. found, lease, err = s.leasePool.GetLease(net.IP(packetOptions[dhcp4.OptionRequestedIPAddress])) if err != nil { @@ -520,28 +469,28 @@ func (s *Server) GetLease(packet dhcp4.Packet) (found bool, lease leasepool.Leas } if found { - //If lease is free, return it to client. If it is not - //free match against the MAC address and client - //identifier. + // If lease is free, return it to client. If it is not + // free match against the MAC address and client + // identifier. if lease.Status == leasepool.Free { - //Lease Is Free you Can Have it. + // Lease Is Free you Can Have it. return } if bytes.Equal(lease.MACAddress, packet.CHAddr()) && bytes.Equal(lease.ClientID, clientID) { - //Lease isn't free but it's yours + // Lease isn't free but it's yours return } } } - //Ok Even if you requested an IP you can't have it. + // Ok Even if you requested an IP you can't have it. found, lease, err = s.leasePool.GetLeaseForClient(packet.CHAddr(), clientID) if found || err != nil { return } - //Just get the next free lease if you can. + // Just get the next free lease if you can. found, lease, err = s.leasePool.GetNextFreeLease() return } @@ -570,10 +519,10 @@ func (s *Server) GC() error { for i := range leases { if leases[i].Status != leasepool.Free { - //Lease Is Not Free + // Lease Is Not Free if time.Now().After(leases[i].Expiry) { - //Lease has expired. + // Lease has expired. leases[i].Status = leasepool.Free updated, err := s.leasePool.UpdateLease(leases[i]) if err != nil { diff --git a/pkg/testutils/testing.go b/pkg/testutils/testing.go index 9444a8b2d..a539091b6 100644 --- a/pkg/testutils/testing.go +++ b/pkg/testutils/testing.go @@ -19,7 +19,7 @@ import ( ) // AllSpecVersions contains all CNI spec version numbers -var AllSpecVersions = [...]string{"0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0"} +var AllSpecVersions = [...]string{"0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0", "1.1.0"} // SpecVersionHasIPVersion returns true if the given CNI specification version // includes the "version" field in the IP address elements @@ -39,6 +39,20 @@ func SpecVersionHasCHECK(ver string) bool { return ok } +// SpecVersionHasGC returns true if the given CNI specification version +// supports the GC command +func SpecVersionHasGC(ver string) bool { + ok, _ := version.GreaterThanOrEqualTo(ver, "1.1.0") + return ok +} + +// SpecVersionHasSTATUS returns true if the given CNI specification version +// supports the STATUS command +func SpecVersionHasSTATUS(ver string) bool { + ok, _ := version.GreaterThanOrEqualTo(ver, "1.1.0") + return ok +} + // SpecVersionHasChaining returns true if the given CNI specification version // supports plugin chaining func SpecVersionHasChaining(ver string) bool { diff --git a/plugins/ipam/dhcp/daemon.go b/plugins/ipam/dhcp/daemon.go index 7fc949c30..b796efd33 100644 --- a/plugins/ipam/dhcp/daemon.go +++ b/plugins/ipam/dhcp/daemon.go @@ -19,6 +19,7 @@ import ( "encoding/json" "errors" "fmt" + "log" "net" "net/http" "net/rpc" @@ -130,6 +131,36 @@ func (d *DHCP) Release(args *skel.CmdArgs, _ *struct{}) error { return nil } +func (d *DHCP) Ping(_ *skel.CmdArgs, _ *struct{}) error { + return nil +} + +func (d *DHCP) GC(args *skel.CmdArgs, _ *struct{}) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("error parsing netconf: %v", err) + } + + keepClientIDs := make(map[string]struct{}, len(conf.ValidAttachments)) + for _, keep := range conf.ValidAttachments { + keepClientIDs[generateClientID(keep.ContainerID, conf.Name, keep.IfName)] = struct{}{} + } + log.Printf("GC, keeping containers %v", keepClientIDs) + + d.mux.Lock() + defer d.mux.Unlock() + + for clientID, l := range d.leases { + if _, ok := keepClientIDs[clientID]; ok { + continue + } + l.Stop() + delete(d.leases, clientID) + } + + return nil +} + func (d *DHCP) getLease(clientID string) *DHCPLease { d.mux.Lock() defer d.mux.Unlock() diff --git a/plugins/ipam/dhcp/dhcp2_test.go b/plugins/ipam/dhcp/dhcp2_test.go index 9edb4fcd2..1bb1638e3 100644 --- a/plugins/ipam/dhcp/dhcp2_test.go +++ b/plugins/ipam/dhcp/dhcp2_test.go @@ -29,6 +29,7 @@ import ( current "github.com/containernetworking/cni/pkg/types/100" "github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/testutils" + "github.com/containernetworking/plugins/pkg/testutils/dhcp4server/leasepool/memorypool" ) var _ = Describe("DHCP Multiple Lease Operations", func() { @@ -39,6 +40,7 @@ var _ = Describe("DHCP Multiple Lease Operations", func() { var socketPath string var tmpDir string var err error + var leasepool *memorypool.MemoryPool BeforeEach(func() { dhcpServerStopCh, socketPath, originalNS, targetNS, err = dhcpSetupOriginalNS() @@ -61,13 +63,15 @@ var _ = Describe("DHCP Multiple Lease Operations", func() { }) // Start the DHCP server - dhcpServerDone, err = dhcpServerStart(originalNS, 2, dhcpServerStopCh) + dhcpServerDone, leasepool, err = dhcpServerStart(originalNS, 2, dhcpServerStopCh) Expect(err).NotTo(HaveOccurred()) // Start the DHCP client daemon dhcpPluginPath, err := exec.LookPath("dhcp") Expect(err).NotTo(HaveOccurred()) clientCmd = exec.Command(dhcpPluginPath, "daemon", "-socketpath", socketPath) + clientCmd.Stderr = GinkgoWriter + clientCmd.Stdout = GinkgoWriter err = clientCmd.Start() Expect(err).NotTo(HaveOccurred()) Expect(clientCmd.Process).NotTo(BeNil()) @@ -177,4 +181,67 @@ var _ = Describe("DHCP Multiple Lease Operations", func() { }) Expect(err).NotTo(HaveOccurred()) }) + + It("[1.1.0] clears stale leases with GC", func() { + conf := fmt.Sprintf(`{ + "cniVersion": "1.1.0", + "name": "mynet", + "type": "ipvlan", + "ipam": { + "type": "dhcp", + "daemonSocketPath": "%s" + } + }`, socketPath) + + args1 := &skel.CmdArgs{ + ContainerID: "dummy1", + Netns: targetNS.Path(), + IfName: contVethName, + StdinData: []byte(conf), + } + args2 := &skel.CmdArgs{ + ContainerID: "dummy2", + Netns: targetNS.Path(), + IfName: contVethName, + StdinData: []byte(conf), + } + + gcConf := fmt.Sprintf(`{ + "cniVersion": "1.1.0", + "name": "mynet", + "type": "ipvlan", + "ipam": { + "type": "dhcp", + "daemonSocketPath": "%s" + }, + "cni.dev/valid-attachments": [{"containerID": "dummy1", "ifname": "eth0"}] + }`, socketPath) + + gcArgs := &skel.CmdArgs{ + StdinData: []byte(gcConf), + } + + err := originalNS.Do(func(ns.NetNS) error { + defer GinkgoRecover() + _, _, err := testutils.CmdAddWithArgs(args1, func() error { + return cmdAdd(args1) + }) + Expect(err).NotTo(HaveOccurred()) + + _, _, err = testutils.CmdAddWithArgs(args2, func() error { + return cmdAdd(args2) + }) + Expect(err).NotTo(HaveOccurred()) + Expect(leasepool.GetLeases()).To(HaveLen(2)) + + err = testutils.CmdGC(func() error { + return cmdGC(gcArgs) + }) + Expect(err).NotTo(HaveOccurred()) + Expect(leasepool.GetLeases()).To(HaveLen(1)) + + return nil + }) + Expect(err).NotTo(HaveOccurred()) + }) }) diff --git a/plugins/ipam/dhcp/dhcp_test.go b/plugins/ipam/dhcp/dhcp_test.go index 9c77a744c..c9c6d2de3 100644 --- a/plugins/ipam/dhcp/dhcp_test.go +++ b/plugins/ipam/dhcp/dhcp_test.go @@ -26,9 +26,6 @@ import ( "time" "github.com/d2g/dhcp4" - "github.com/d2g/dhcp4server" - "github.com/d2g/dhcp4server/leasepool" - "github.com/d2g/dhcp4server/leasepool/memorypool" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/vishvananda/netlink" @@ -37,6 +34,9 @@ import ( types100 "github.com/containernetworking/cni/pkg/types/100" "github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/testutils" + "github.com/containernetworking/plugins/pkg/testutils/dhcp4server" + "github.com/containernetworking/plugins/pkg/testutils/dhcp4server/leasepool" + "github.com/containernetworking/plugins/pkg/testutils/dhcp4server/leasepool/memorypool" ) func getTmpDir() (string, error) { @@ -48,7 +48,7 @@ func getTmpDir() (string, error) { return tmpDir, err } -func dhcpServerStart(netns ns.NetNS, numLeases int, stopCh <-chan bool) (*sync.WaitGroup, error) { +func dhcpServerStart(netns ns.NetNS, numLeases int, stopCh <-chan bool) (*sync.WaitGroup, *memorypool.MemoryPool, error) { // Add the expected IP to the pool lp := memorypool.MemoryPool{} @@ -60,7 +60,7 @@ func dhcpServerStart(netns ns.NetNS, numLeases int, stopCh <-chan bool) (*sync.W for i := 5; i < numLeases+5; i++ { err := lp.AddLease(leasepool.Lease{IP: dhcp4.IPAdd(net.IPv4(192, 168, 1, byte(i)), 0)}) if err != nil { - return nil, fmt.Errorf("error adding IP to DHCP pool: %v", err) + return nil, nil, fmt.Errorf("error adding IP to DHCP pool: %v", err) } } @@ -72,7 +72,7 @@ func dhcpServerStart(netns ns.NetNS, numLeases int, stopCh <-chan bool) (*sync.W dhcp4server.LeaseDuration(time.Minute*15), ) if err != nil { - return nil, fmt.Errorf("failed to create DHCP server: %v", err) + return nil, nil, fmt.Errorf("failed to create DHCP server: %v", err) } stopWg := sync.WaitGroup{} @@ -108,7 +108,7 @@ func dhcpServerStart(netns ns.NetNS, numLeases int, stopCh <-chan bool) (*sync.W }() startWg.Wait() - return &stopWg, nil + return &stopWg, &lp, nil } const ( @@ -200,7 +200,7 @@ var _ = Describe("DHCP Operations", func() { }) // Start the DHCP server - dhcpServerDone, err = dhcpServerStart(originalNS, 1, dhcpServerStopCh) + dhcpServerDone, _, err = dhcpServerStart(originalNS, 1, dhcpServerStopCh) Expect(err).NotTo(HaveOccurred()) // Start the DHCP client daemon @@ -209,8 +209,8 @@ var _ = Describe("DHCP Operations", func() { clientCmd = exec.Command(dhcpPluginPath, "daemon", "-socketpath", socketPath) // copy dhcp client's stdout/stderr to test stdout - clientCmd.Stdout = os.Stdout - clientCmd.Stderr = os.Stderr + clientCmd.Stdout = GinkgoWriter + clientCmd.Stderr = GinkgoWriter err = clientCmd.Start() Expect(err).NotTo(HaveOccurred()) @@ -264,6 +264,13 @@ var _ = Describe("DHCP Operations", func() { err := originalNS.Do(func(ns.NetNS) error { defer GinkgoRecover() + if testutils.SpecVersionHasSTATUS(ver) { + err = testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + r, _, err := testutils.CmdAddWithArgs(args, func() error { return cmdAdd(args) }) @@ -517,7 +524,7 @@ var _ = Describe("DHCP Lease Unavailable Operations", func() { }) // Start the DHCP server - dhcpServerDone, err = dhcpServerStart(originalNS, 1, dhcpServerStopCh) + dhcpServerDone, _, err = dhcpServerStart(originalNS, 1, dhcpServerStopCh) Expect(err).NotTo(HaveOccurred()) // Start the DHCP client daemon @@ -528,7 +535,7 @@ var _ = Describe("DHCP Lease Unavailable Operations", func() { // `go test` timeout with default delays. Since our DHCP server // and client daemon are local processes anyway, we can depend on // them to respond very quickly. - clientCmd = exec.Command(dhcpPluginPath, "daemon", "-socketpath", socketPath, "-timeout", "2s", "-resendmax", "8s") + clientCmd = exec.Command(dhcpPluginPath, "daemon", "-socketpath", socketPath, "-timeout", "1s", "-resendmax", "2s") // copy dhcp client's stdout/stderr to test stdout var b bytes.Buffer @@ -652,3 +659,23 @@ var _ = Describe("DHCP Lease Unavailable Operations", func() { }) } }) + +var _ = Describe("Status returns an error when the daemon is not running", func() { + conf := `{ + "cniVersion": "1.1.0", + "name": "mynet", + "type": "ipvlan", + "ipam": { + "type": "dhcp", + "daemonSocketPath": "/does/not/exist" + } + }` + + args := &skel.CmdArgs{ + StdinData: []byte(conf), + } + err := testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).To(HaveOccurred()) +}) diff --git a/plugins/ipam/dhcp/main.go b/plugins/ipam/dhcp/main.go index d0a94e26c..5517d4e13 100644 --- a/plugins/ipam/dhcp/main.go +++ b/plugins/ipam/dhcp/main.go @@ -39,7 +39,6 @@ type NetConf struct { types.NetConf IPAM *IPAMConfig `json:"ipam"` } - type IPAMConfig struct { types.IPAM DaemonSocketPath string `json:"daemonSocketPath"` @@ -96,7 +95,13 @@ func main() { os.Exit(1) } } else { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("dhcp")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + GC: cmdGC, + }, version.All, bv.BuildString("dhcp")) } } @@ -134,6 +139,16 @@ func cmdCheck(args *skel.CmdArgs) error { return rpcCall("DHCP.Allocate", args, result) } +func cmdStatus(args *skel.CmdArgs) error { + result := struct{}{} + return rpcCall("DHCP.Ping", args, &result) +} + +func cmdGC(args *skel.CmdArgs) error { + result := struct{}{} + return rpcCall("DHCP.GC", args, &result) +} + func getSocketPath(stdinData []byte) (string, error) { conf := NetConf{} if err := json.Unmarshal(stdinData, &conf); err != nil { diff --git a/plugins/ipam/host-local/backend/allocator/config.go b/plugins/ipam/host-local/backend/allocator/config.go index f62aa551f..660635525 100644 --- a/plugins/ipam/host-local/backend/allocator/config.go +++ b/plugins/ipam/host-local/backend/allocator/config.go @@ -38,6 +38,16 @@ type Net struct { Args *struct { A *IPAMArgs `json:"cni"` } `json:"args"` + + ValidAttachments []GCAttachment `json:"cni.dev/valid-attachments,omitempty"` +} + +// GCAttachment is the parameters to a GC call -- namely, +// the container ID and ifname pair that represents a +// still-valid attachment. +type GCAttachment struct { + ContainerID string `json:"containerID"` + IfName string `json:"ifname"` } // IPAMConfig represents the IP related network configuration. @@ -72,15 +82,23 @@ type Range struct { Gateway net.IP `json:"gateway,omitempty"` } -// NewIPAMConfig creates a NetworkConfig from the given network name. -func LoadIPAMConfig(bytes []byte, envArgs string) (*IPAMConfig, string, error) { +func ParseConfig(bytes []byte) (*Net, error) { n := Net{} if err := json.Unmarshal(bytes, &n); err != nil { - return nil, "", err + return nil, err } if n.IPAM == nil { - return nil, "", fmt.Errorf("IPAM config missing 'ipam' key") + return nil, fmt.Errorf("IPAM config missing 'ipam' key") + } + return &n, nil +} + +// NewIPAMConfig creates a NetworkConfig from the given network name. +func LoadIPAMConfig(bytes []byte, envArgs string) (*IPAMConfig, string, error) { + n, err := ParseConfig(bytes) + if err != nil { + return nil, "", err } // parse custom IP from env args diff --git a/plugins/ipam/host-local/backend/disk/backend.go b/plugins/ipam/host-local/backend/disk/backend.go index 3ad19d99d..80ced518e 100644 --- a/plugins/ipam/host-local/backend/disk/backend.go +++ b/plugins/ipam/host-local/backend/disk/backend.go @@ -15,7 +15,10 @@ package disk import ( + "errors" + "fmt" "net" + "net/netip" "os" "path/filepath" "runtime" @@ -114,6 +117,47 @@ func (s *Store) FindByKey(match string) (bool, error) { return found, err } +// ReleaseExceptKeys keeps only the entries in the store that correspond to +// The set of matches we wish to keep +func (s *Store) ReleaseExceptKeys(keys []string) error { + errs := []error{} + + keep := map[string]struct{}{} + for _, k := range keys { + keep[k] = struct{}{} + } + + err := filepath.WalkDir(s.dataDir, func(path string, d os.DirEntry, err error) error { + if err != nil { + return err + } + // Only consider IP filenames + if !d.Type().IsRegular() || !ISIPFilename(d.Name()) { + return nil + } + + data, err := os.ReadFile(path) + if err != nil { + errs = append(errs, err) + return nil + } + if _, ok := keep[strings.TrimSpace(string(data))]; !ok { + err := os.Remove(path) + if err != nil { + errs = append(errs, err) + } + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to GC store directory %s: %w", s.dataDir, err) + } + if len(errs) != 0 { + return errors.Join(errs...) + } + return nil +} + func (s *Store) FindByID(id string, ifname string) bool { s.Lock() defer s.Unlock() @@ -130,6 +174,19 @@ func (s *Store) FindByID(id string, ifname string) bool { return found } +// ReleaseExcept cleans the store, preserving only the (id, ifname) pairs +// provided in keeps +func (s *Store) ReleaseExcept(keeps [][2]string) error { + s.Lock() + defer s.Unlock() + + matches := make([]string, 0, len(keeps)) + for _, keep := range keeps { + matches = append(matches, strings.TrimSpace(keep[0])+LineBreak+keep[1]) + } + return s.ReleaseExceptKeys(matches) +} + func (s *Store) ReleaseByKey(match string) (bool, error) { found := false err := filepath.Walk(s.dataDir, func(path string, info os.FileInfo, err error) error { @@ -200,3 +257,14 @@ func GetEscapedPath(dataDir string, fname string) string { } return filepath.Join(dataDir, fname) } + +func ISIPFilename(fname string) bool { + if strings.HasPrefix(fname, lastIPFilePrefix) || fname == "lock" { + return false + } + if runtime.GOOS == "windows" { + fname = strings.ReplaceAll(fname, "_", ":") + } + _, err := netip.ParseAddr(fname) + return err == nil +} diff --git a/plugins/ipam/host-local/backend/store.go b/plugins/ipam/host-local/backend/store.go index afd2af6e8..a03d30f19 100644 --- a/plugins/ipam/host-local/backend/store.go +++ b/plugins/ipam/host-local/backend/store.go @@ -24,4 +24,5 @@ type Store interface { LastReservedIP(rangeID string) (net.IP, error) ReleaseByID(id string, ifname string) error GetByID(id string, ifname string) []net.IP + ReleaseExcept(keeps [][2]string) error } diff --git a/plugins/ipam/host-local/backend/testing/fake_store.go b/plugins/ipam/host-local/backend/testing/fake_store.go index 954044351..6b4f42e40 100644 --- a/plugins/ipam/host-local/backend/testing/fake_store.go +++ b/plugins/ipam/host-local/backend/testing/fake_store.go @@ -89,3 +89,18 @@ func (s *FakeStore) GetByID(id string, _ string) []net.IP { func (s *FakeStore) SetIPMap(m map[string]string) { s.ipMap = m } + +func (s *FakeStore) ReleaseExcept(keeps [][2]string) error { + keepKeys := map[string]struct{}{} + for _, keep := range keeps { + id := keep[0] + keepKeys[id] = struct{}{} + } + + for k := range s.ipMap { + if _, ok := keepKeys[k]; !ok { + delete(s.ipMap, k) + } + } + return nil +} diff --git a/plugins/ipam/host-local/host_local_test.go b/plugins/ipam/host-local/host_local_test.go index 1ab15cba2..0400499bc 100644 --- a/plugins/ipam/host-local/host_local_test.go +++ b/plugins/ipam/host-local/host_local_test.go @@ -18,11 +18,13 @@ import ( "fmt" "net" "os" + "path" "path/filepath" "strings" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/tidwall/sjson" "github.com/containernetworking/cni/pkg/skel" "github.com/containernetworking/cni/pkg/types" @@ -678,6 +680,128 @@ var _ = Describe("host-local Operations", func() { } }) } + + It("Verfies that GC works as expected", func() { + conf := fmt.Sprintf(`{ + "cniVersion": "1.1.0", + "name": "mynet0", + "type": "ipvlan", + "ipam": { + "type": "host-local", + "dataDir": "%s", + "ranges": [ + [{ "subnet": "10.1.2.0/24" }] + ] + } + }`, tmpDir) + + args := &skel.CmdArgs{ + ContainerID: "dummy", + Netns: nspath, + IfName: ifname, + StdinData: []byte(conf), + } + + args1 := &skel.CmdArgs{ + ContainerID: "dummy1", + Netns: nspath, + IfName: ifname, + StdinData: []byte(conf), + } + + // Allocate the IP + res, _, err := testutils.CmdAddWithArgs(args, func() error { + return cmdAdd(args) + }) + Expect(err).NotTo(HaveOccurred()) + + result, err := types100.GetResult(res) + Expect(err).NotTo(HaveOccurred()) + ip1 := result.IPs[0].Address.IP + + // Allocate the IP with the another container ID + res, _, err = testutils.CmdAddWithArgs(args1, func() error { + return cmdAdd(args1) + }) + Expect(err).NotTo(HaveOccurred()) + result, err = types100.GetResult(res) + Expect(err).NotTo(HaveOccurred()) + ip2 := result.IPs[0].Address.IP + + ipReserved := func(ip net.IP, expected bool) { + GinkgoHelper() + + fname := disk.GetEscapedPath(path.Join(tmpDir, "mynet0"), ip.String()) + _, err := os.Stat(fname) + if os.IsNotExist(err) { + if expected { + Fail(fmt.Sprintf("IP %s should have existed, but it did not", ip.String())) + } + return + } + Expect(err).NotTo(HaveOccurred()) + if !expected { + Fail(fmt.Sprintf("IP %s should not have existed, but it did not", ip.String())) + } + } + + ipReserved(ip1, true) + ipReserved(ip2, true) + + // alternating container, ifname pairs + doGC := func(valid ...string) { + GinkgoHelper() + if len(valid)%2 != 0 { + Fail("doGC needs id, ifname pairs") + } + a := []types.GCAttachment{} + for i := 0; i < len(valid); { + a = append(a, types.GCAttachment{ + ContainerID: valid[i], + IfName: valid[i+1], + }) + i += 2 + } + + confGC := fmt.Sprintf(`{ + "cniVersion": "1.1.0", + "name": "mynet0", + "type": "ipvlan", + "ipam": { + "type": "host-local", + "dataDir": "%s", + "ranges": [ + [{ "subnet": "10.1.2.0/24" }] + ] + } + }`, tmpDir) + confGC, err := sjson.Set(confGC, "cni\\.dev/valid-attachments", a) + Expect(err).NotTo(HaveOccurred()) + + args := &skel.CmdArgs{ + StdinData: []byte(confGC), + } + err = testutils.CmdGC(func() error { + return cmdGC(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + + // keep all attachments + doGC("dummy", ifname, "dummy1", ifname) + ipReserved(ip1, true) + ipReserved(ip2, true) + + // Keep one attachment, keep another with wrong ifname + doGC("dummy", ifname, "dummy1", "eth42") + ipReserved(ip1, true) + ipReserved(ip2, false) + + // Keep no attachments + doGC("different", "eth11") + ipReserved(ip1, false) + ipReserved(ip2, false) + }) }) func mustCIDR(s string) net.IPNet { diff --git a/plugins/ipam/host-local/main.go b/plugins/ipam/host-local/main.go index 0f53574ea..e63e40676 100644 --- a/plugins/ipam/host-local/main.go +++ b/plugins/ipam/host-local/main.go @@ -29,7 +29,12 @@ import ( ) func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("host-local")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + GC: cmdGC, + }, version.All, bv.BuildString("host-local")) } func cmdCheck(args *skel.CmdArgs) error { @@ -160,3 +165,23 @@ func cmdDel(args *skel.CmdArgs) error { } return nil } + +func cmdGC(args *skel.CmdArgs) error { + netConf, err := allocator.ParseConfig(args.StdinData) + if err != nil { + return err + } + store, err := disk.New(netConf.Name, netConf.IPAM.DataDir) + if err != nil { + return err + } + defer store.Close() + + keepKeys := make([][2]string, 0, len(netConf.ValidAttachments)) + + for _, a := range netConf.ValidAttachments { + keepKeys = append(keepKeys, [2]string{a.ContainerID, a.IfName}) + } + + return store.ReleaseExcept(keepKeys) +} diff --git a/plugins/ipam/static/main.go b/plugins/ipam/static/main.go index bf541d2d6..25731c9d0 100644 --- a/plugins/ipam/static/main.go +++ b/plugins/ipam/static/main.go @@ -68,7 +68,12 @@ type Address struct { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("static")) + skel.PluginMainFuncs( + skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + }, version.All, bv.BuildString("static")) } func loadNetConf(bytes []byte) (*types.NetConf, string, error) { diff --git a/plugins/main/bridge/bridge.go b/plugins/main/bridge/bridge.go index 4054f6176..c40a0aa45 100644 --- a/plugins/main/bridge/bridge.go +++ b/plugins/main/bridge/bridge.go @@ -367,6 +367,15 @@ func ensureBridge(brName string, mtu int, promiscMode, vlanFiltering bool) (*net if err != nil { return nil, err } + if mtu > 0 && br.MTU != mtu { + if err := netlink.LinkSetMTU(br, mtu); err != nil { + return nil, fmt.Errorf("could not update MTU on %q: %w", brName, err) + } + br, err = bridgeByName(brName) + if err != nil { + return nil, err + } + } // we want to own the routes for this interface _, _ = sysctl.Sysctl(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", brName), "0") @@ -424,6 +433,7 @@ func setupVeth(netns ns.NetNS, br *netlink.Bridge, ifName string, mtu int, hairp contIface.Name = containerVeth.Name contIface.Mac = containerVeth.HardwareAddr.String() contIface.Sandbox = netns.Path() + contIface.Mtu = containerVeth.MTU hostIface.Name = hostVeth.Name return nil }) @@ -437,6 +447,7 @@ func setupVeth(netns ns.NetNS, br *netlink.Bridge, ifName string, mtu int, hairp return nil, nil, fmt.Errorf("failed to lookup %q: %v", hostIface.Name, err) } hostIface.Mac = hostVeth.Attrs().HardwareAddr.String() + hostIface.Mtu = hostVeth.Attrs().MTU // connect host veth end to the bridge if err := netlink.LinkSetMaster(hostVeth, br); err != nil { @@ -511,6 +522,7 @@ func setupBridge(n *NetConf) (*netlink.Bridge, *current.Interface, error) { return br, ¤t.Interface{ Name: br.Attrs().Name, Mac: br.Attrs().HardwareAddr.String(), + Mtu: br.Attrs().MTU, }, nil } @@ -827,7 +839,13 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("bridge")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + GC: cmdGC, + }, version.All, bv.BuildString("bridge")) } type cniBridgeIf struct { @@ -1088,3 +1106,37 @@ func cmdCheck(args *skel.CmdArgs) error { func uniqueID(containerID, cniIface string) string { return containerID + "-" + cniIface } + +func cmdStatus(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if conf.IPAM.Type != "" { + if err := ipam.ExecStatus(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + return nil +} + +func cmdGC(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if conf.IPAM.Type != "" { + if err := ipam.ExecGC(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + // We do not clean up any stale bridge ports here, as there are use-cases where + // additional bridge ports are created by external users. + + // TODO: Clean up any stale masquerading rules + return nil +} diff --git a/plugins/main/bridge/bridge_test.go b/plugins/main/bridge/bridge_test.go index 42c70ae29..3e5c62265 100644 --- a/plugins/main/bridge/bridge_test.go +++ b/plugins/main/bridge/bridge_test.go @@ -43,6 +43,7 @@ const ( BRNAMEVLAN = "bridge0.100" IFNAME = "eth0" NAMESERVER = "192.0.2.0" + MTU = 5000 ) type Net struct { @@ -118,7 +119,7 @@ func (tc testCase) netConf() *NetConf { BrName: BRNAME, IsGW: tc.isGW, IPMasq: false, - MTU: 5000, + MTU: MTU, } } @@ -128,6 +129,7 @@ const ( "cniVersion": "%s", "name": "testConfig", "type": "bridge", + "mtu": %d, "bridge": "%s"` vlan = `, @@ -215,7 +217,7 @@ const ( // netConfJSON() generates a JSON network configuration string // for a test case. func (tc testCase) netConfJSON(dataDir string) string { - conf := fmt.Sprintf(netConfStr, tc.cniVersion, BRNAME) + conf := fmt.Sprintf(netConfStr, tc.cniVersion, tc.netConf().MTU, BRNAME) if tc.vlan != 0 { conf += fmt.Sprintf(vlan, tc.vlan) @@ -494,6 +496,11 @@ type ( func newTesterByVersion(version string, testNS, targetNS ns.NetNS) cmdAddDelTester { switch { + case strings.HasPrefix(version, "1.1."): + return &testerV10x{ + testNS: testNS, + targetNS: targetNS, + } case strings.HasPrefix(version, "1.0."): return &testerV10x{ testNS: testNS, @@ -526,6 +533,14 @@ func (tester *testerV10x) cmdAddTest(tc testCase, dataDir string) (types.Result, err := tester.testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() + // check that STATUS is + if testutils.SpecVersionHasSTATUS(tc.cniVersion) { + err := testutils.CmdStatus(func() error { + return cmdStatus(&skel.CmdArgs{StdinData: []byte(tc.netConfJSON(dataDir))}) + }) + Expect(err).NotTo(HaveOccurred()) + } + r, raw, err := testutils.CmdAddWithArgs(tester.args, func() error { return cmdAdd(tester.args) }) @@ -555,6 +570,10 @@ func (tester *testerV10x) cmdAddTest(tc testCase, dataDir string) (types.Result, } Expect(result.Interfaces[2].Sandbox).To(Equal(tester.targetNS.Path())) + Expect(result.Interfaces[0].Mtu).To(Equal(MTU)) + Expect(result.Interfaces[1].Mtu).To(Equal(MTU)) + Expect(result.Interfaces[2].Mtu).To(Equal(MTU)) + // Make sure bridge link exists link, err := netlink.LinkByName(result.Interfaces[0].Name) Expect(err).NotTo(HaveOccurred()) @@ -687,6 +706,7 @@ func (tester *testerV10x) cmdAddTest(tc testCase, dataDir string) (types.Result, Expect(link.Attrs().Name).To(Equal(IFNAME)) Expect(link).To(BeAssignableToTypeOf(&netlink.Veth{})) assertContainerInterfaceLinkState(&tc, link) + Expect(link.Attrs().MTU).To(Equal(MTU)) expCIDRsV4, expCIDRsV6 := tc.expectedCIDRs() addrs, err := netlink.AddrList(link, netlink.FAMILY_V4) @@ -856,6 +876,17 @@ func (tester *testerV10x) cmdDelTest(tc testCase, dataDir string) { return nil }) Expect(err).NotTo(HaveOccurred()) + + // Make sure that GC does not reuturn an error + if testutils.SpecVersionHasGC(tc.cniVersion) { + err = tester.testNS.Do(func(ns.NetNS) error { + defer GinkgoRecover() + return testutils.CmdGC(func() error { + return cmdGC(tester.args) + }) + }) + Expect(err).NotTo(HaveOccurred()) + } } func (tester *testerV04x) cmdAddTest(tc testCase, dataDir string) (types.Result, error) { @@ -1477,7 +1508,7 @@ func (tester *testerV01xOr02x) cmdAddTest(tc testCase, dataDir string) (types.Re return err } Expect(err).NotTo(HaveOccurred()) - Expect(strings.Index(string(raw), "\"ip\":")).Should(BeNumerically(">", 0)) + Expect(string(raw)).Should(ContainSubstring("\"ip\":")) // We expect a version 0.1.0 or 0.2.0 result _, err = r.GetAsVersion(tc.cniVersion) @@ -1861,6 +1892,7 @@ var _ = Describe("bridge Operations", func() { bridge, _, err := setupBridge(conf) Expect(err).NotTo(HaveOccurred()) Expect(bridge.Attrs().Name).To(Equal(BRNAME)) + Expect(bridge.Attrs().MTU).To(Equal(MTU)) // Double check that the link was added link, err := netlink.LinkByName(BRNAME) @@ -1879,6 +1911,7 @@ var _ = Describe("bridge Operations", func() { err := netlink.LinkAdd(&netlink.Bridge{ LinkAttrs: netlink.LinkAttrs{ Name: BRNAME, + MTU: MTU, }, }) Expect(err).NotTo(HaveOccurred()) diff --git a/plugins/main/dummy/dummy.go b/plugins/main/dummy/dummy.go index 8b3fd2dc3..493dcd885 100644 --- a/plugins/main/dummy/dummy.go +++ b/plugins/main/dummy/dummy.go @@ -64,6 +64,7 @@ func createDummy(ifName string, netns ns.NetNS) (*current.Interface, error) { dummy.Mac = contDummy.Attrs().HardwareAddr.String() dummy.Sandbox = netns.Path() + dummy.Mtu = contDummy.Attrs().MTU return nil }) @@ -179,7 +180,13 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("dummy")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + GC: cmdGC, + }, version.All, bv.BuildString("dummy")) } func cmdCheck(args *skel.CmdArgs) error { @@ -288,3 +295,29 @@ func validateCniContainerInterface(intf current.Interface) error { return nil } + +func cmdStatus(args *skel.CmdArgs) error { + conf := types.NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if err := ipam.ExecStatus(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + + return nil +} + +func cmdGC(args *skel.CmdArgs) error { + conf := types.NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if err := ipam.ExecGC(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + + return nil +} diff --git a/plugins/main/dummy/dummy_test.go b/plugins/main/dummy/dummy_test.go index 6d7c0d0a2..3588b01a0 100644 --- a/plugins/main/dummy/dummy_test.go +++ b/plugins/main/dummy/dummy_test.go @@ -106,7 +106,7 @@ type ( func newTesterByVersion(version string) tester { switch { - case strings.HasPrefix(version, "1.0."): + case strings.HasPrefix(version, "1."): return &testerV10x{} case strings.HasPrefix(version, "0.4."): return &testerV04x{} @@ -124,6 +124,7 @@ func (t *testerV10x) verifyResult(result types.Result, name string) string { Expect(r.Interfaces).To(HaveLen(1)) Expect(r.Interfaces[0].Name).To(Equal(name)) + Expect(r.Interfaces[0].Mtu).To(BeNumerically(">", 0)) Expect(r.IPs).To(HaveLen(1)) return r.Interfaces[0].Mac @@ -261,6 +262,13 @@ var _ = Describe("dummy Operations", func() { defer GinkgoRecover() var err error + if testutils.SpecVersionHasSTATUS(ver) { + err = testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + result, _, err = testutils.CmdAddWithArgs(args, func() error { return cmdAdd(args) }) @@ -350,6 +358,14 @@ var _ = Describe("dummy Operations", func() { return cmdDel(args) }) Expect(err).NotTo(HaveOccurred()) + + if testutils.SpecVersionHasGC(ver) { + err = testutils.CmdGC(func() error { + return cmdGC(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + return nil }) Expect(err).NotTo(HaveOccurred()) diff --git a/plugins/main/host-device/host-device.go b/plugins/main/host-device/host-device.go index 33f91414e..7cffc337f 100644 --- a/plugins/main/host-device/host-device.go +++ b/plugins/main/host-device/host-device.go @@ -147,6 +147,7 @@ func cmdAdd(args *skel.CmdArgs) error { Name: contDev.Attrs().Name, Mac: contDev.Attrs().HardwareAddr.String(), Sandbox: containerNs.Path(), + Mtu: contDev.Attrs().MTU, }} } @@ -453,6 +454,7 @@ func printLink(dev netlink.Link, cniVersion string, containerNs ns.NetNS) error Name: dev.Attrs().Name, Mac: dev.Attrs().HardwareAddr.String(), Sandbox: containerNs.Path(), + Mtu: dev.Attrs().MTU, }, }, } @@ -518,7 +520,13 @@ func getLink(devname, hwaddr, kernelpath, pciaddr string, auxDev string) (netlin } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("host-device")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + GC: cmdGC, + }, version.All, bv.BuildString("host-device")) } func cmdCheck(args *skel.CmdArgs) error { @@ -625,3 +633,33 @@ func validateCniContainerInterface(intf current.Interface) error { return nil } + +func cmdStatus(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if conf.IPAM.Type != "" { + if err := ipam.ExecStatus(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + return nil +} + +func cmdGC(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if conf.IPAM.Type != "" { + if err := ipam.ExecGC(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + return nil +} diff --git a/plugins/main/host-device/host-device_test.go b/plugins/main/host-device/host-device_test.go index 2c26d03cb..20b7e30dc 100644 --- a/plugins/main/host-device/host-device_test.go +++ b/plugins/main/host-device/host-device_test.go @@ -231,7 +231,7 @@ type ( func newTesterByVersion(version string) tester { switch { - case strings.HasPrefix(version, "1.0."): + case strings.HasPrefix(version, "1."): return &testerV10x{} case strings.HasPrefix(version, "0.4."): return &testerV04x{} @@ -252,6 +252,7 @@ func (t *testerV10x) expectInterfaces(result types.Result, name, mac, sandbox st Name: name, Mac: mac, Sandbox: sandbox, + Mtu: 1500, }, })) } @@ -330,7 +331,7 @@ var _ = Describe("base functionality", func() { Expect(testutils.UnmountNS(targetNS)).To(Succeed()) }) - for _, ver := range []string{"0.3.0", "0.3.1", "0.4.0", "1.0.0"} { + for _, ver := range testutils.AllSpecVersions[2:] { // from v0.3 onwards // Redefine ver inside for scope so real value is picked up by each dynamically defined It() // See Gingkgo's "Patterns for dynamically generating tests" documentation. ver := ver @@ -354,7 +355,6 @@ var _ = Describe("base functionality", func() { return nil }) - // call CmdAdd cniName := "eth0" conf := fmt.Sprintf(`{ "cniVersion": "%s", @@ -362,6 +362,16 @@ var _ = Describe("base functionality", func() { "type": "host-device", "device": %q }`, ver, ifname) + + // if v1.1 or greater, call CmdStatus + if testutils.SpecVersionHasSTATUS(ver) { + err := testutils.CmdStatus(func() error { + return cmdStatus(&skel.CmdArgs{StdinData: []byte(conf)}) + }) + Expect(err).NotTo(HaveOccurred()) + } + + // call CmdAdd args := &skel.CmdArgs{ ContainerID: "dummy", Netns: targetNS.Path(), @@ -411,6 +421,14 @@ var _ = Describe("base functionality", func() { Expect(err).NotTo(HaveOccurred()) return nil }) + + // call GC, ensure it does not fail (it should be a noop) + if testutils.SpecVersionHasGC(ver) { + err := testutils.CmdGC(func() error { + return cmdGC(&skel.CmdArgs{StdinData: []byte(conf)}) + }) + Expect(err).NotTo(HaveOccurred()) + } }) It(fmt.Sprintf("[%s] ensures CmdDel is idempotent", ver), func() { @@ -621,7 +639,6 @@ var _ = Describe("base functionality", func() { return nil }) - // call CmdAdd targetIP := "10.10.0.1/24" cniName := "eth0" conf := fmt.Sprintf(`{ @@ -638,6 +655,16 @@ var _ = Describe("base functionality", func() { }, "device": %q }`, ver, ifname) + + // if v1.1 or greater, call CmdStatus + if gt, _ := version.GreaterThanOrEqualTo(ver, "1.1.0"); gt { + err := testutils.CmdStatus(func() error { + return cmdStatus(&skel.CmdArgs{StdinData: []byte(conf)}) + }) + Expect(err).NotTo(HaveOccurred()) + } + + // call CmdAdd args := &skel.CmdArgs{ ContainerID: "dummy", Netns: targetNS.Path(), @@ -695,6 +722,14 @@ var _ = Describe("base functionality", func() { Expect(err).NotTo(HaveOccurred()) return nil }) + + // call GC, ensure it does not fail + if gt, _ := version.GreaterThanOrEqualTo(ver, "1.1.0"); gt { + err := testutils.CmdGC(func() error { + return cmdGC(&skel.CmdArgs{StdinData: []byte(conf)}) + }) + Expect(err).NotTo(HaveOccurred()) + } }) It(fmt.Sprintf("[%s] fails an invalid config", ver), func() { diff --git a/plugins/main/ipvlan/ipvlan.go b/plugins/main/ipvlan/ipvlan.go index d3645ad13..d9e684c2d 100644 --- a/plugins/main/ipvlan/ipvlan.go +++ b/plugins/main/ipvlan/ipvlan.go @@ -177,6 +177,7 @@ func createIpvlan(conf *NetConf, ifName string, netns ns.NetNS) (*current.Interf return fmt.Errorf("failed to refetch ipvlan %q: %v", ipvlan.Name, err) } ipvlan.Mac = contIpvlan.Attrs().HardwareAddr.String() + ipvlan.Mtu = contIpvlan.Attrs().MTU ipvlan.Sandbox = netns.Path() return nil @@ -349,7 +350,13 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("ipvlan")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + GC: cmdGC, + }, version.All, bv.BuildString("ipvlan")) } func cmdCheck(args *skel.CmdArgs) error { @@ -484,3 +491,32 @@ func validateCniContainerInterface(intf current.Interface, modeExpected string) return nil } + +func cmdStatus(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + if conf.IPAM.Type != "" { + if err := ipam.ExecStatus(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + return nil +} + +func cmdGC(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if conf.IPAM.Type != "" { + if err := ipam.ExecGC(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + return nil +} diff --git a/plugins/main/ipvlan/ipvlan_test.go b/plugins/main/ipvlan/ipvlan_test.go index 3c787fd7a..97dfd1024 100644 --- a/plugins/main/ipvlan/ipvlan_test.go +++ b/plugins/main/ipvlan/ipvlan_test.go @@ -114,6 +114,13 @@ func ipvlanAddCheckDelTest(conf, masterName string, originalNS, targetNS ns.NetN err = originalNS.Do(func(ns.NetNS) error { defer GinkgoRecover() + if testutils.SpecVersionHasSTATUS(cniVersion) { + err = testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + result, _, err = testutils.CmdAddWithArgs(args, func() error { return cmdAdd(args) }) @@ -197,6 +204,17 @@ func ipvlanAddCheckDelTest(conf, masterName string, originalNS, targetNS ns.NetN return nil }) Expect(err).NotTo(HaveOccurred()) + + // See that GC succeeds + if testutils.SpecVersionHasGC(cniVersion) { + err = originalNS.Do(func(_ ns.NetNS) error { + defer GinkgoRecover() + return testutils.CmdGC(func() error { + return cmdGC(args) + }) + }) + Expect(err).NotTo(HaveOccurred()) + } } type tester interface { diff --git a/plugins/main/loopback/loopback.go b/plugins/main/loopback/loopback.go index 640b6dd0c..be6c360ef 100644 --- a/plugins/main/loopback/loopback.go +++ b/plugins/main/loopback/loopback.go @@ -55,6 +55,7 @@ func cmdAdd(args *skel.CmdArgs) error { } var v4Addr, v6Addr *net.IPNet + var mtu int args.IfName = "lo" // ignore config, this only works for loopback err = ns.WithNetNSPath(args.Netns, func(_ ns.NetNS) error { @@ -62,6 +63,7 @@ func cmdAdd(args *skel.CmdArgs) error { if err != nil { return err // not tested } + mtu = link.Attrs().MTU err = netlink.LinkSetUp(link) if err != nil { @@ -115,6 +117,7 @@ func cmdAdd(args *skel.CmdArgs) error { Name: args.IfName, Mac: "00:00:00:00:00:00", Sandbox: args.Netns, + Mtu: mtu, }, }, } @@ -172,7 +175,12 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("loopback")) + skel.PluginMainFuncs( + skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + }, version.All, bv.BuildString("loopback")) } func cmdCheck(args *skel.CmdArgs) error { diff --git a/plugins/main/loopback/loopback_test.go b/plugins/main/loopback/loopback_test.go index 0697b80a0..d07da32e8 100644 --- a/plugins/main/loopback/loopback_test.go +++ b/plugins/main/loopback/loopback_test.go @@ -74,7 +74,7 @@ var _ = Describe("Loopback", func() { session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) Expect(err).NotTo(HaveOccurred()) - Eventually(session).Should(gbytes.Say(`{.*}`)) + Eventually(session).Should(gbytes.Say(`.+`)) Eventually(session).Should(gexec.Exit(0)) var lo *net.Interface diff --git a/plugins/main/macvlan/macvlan.go b/plugins/main/macvlan/macvlan.go index 2c6e9204b..4817b5175 100644 --- a/plugins/main/macvlan/macvlan.go +++ b/plugins/main/macvlan/macvlan.go @@ -273,6 +273,7 @@ func createMacvlan(conf *NetConf, ifName string, netns ns.NetNS) (*current.Inter } macvlan.Mac = contMacvlan.Attrs().HardwareAddr.String() macvlan.Sandbox = netns.Path() + macvlan.Mtu = contMacvlan.Attrs().MTU return nil }) @@ -426,7 +427,13 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("macvlan")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + GC: cmdGC, + }, version.All, bv.BuildString("macvlan")) } func cmdCheck(args *skel.CmdArgs) error { @@ -562,3 +569,33 @@ func validateCniContainerInterface(intf current.Interface, modeExpected string) return nil } + +func cmdStatus(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if conf.IPAM.Type != "" { + if err := ipam.ExecStatus(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + return nil +} + +func cmdGC(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if conf.IPAM.Type != "" { + if err := ipam.ExecGC(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + return nil +} diff --git a/plugins/main/macvlan/macvlan_test.go b/plugins/main/macvlan/macvlan_test.go index ae73f99f2..d788b869f 100644 --- a/plugins/main/macvlan/macvlan_test.go +++ b/plugins/main/macvlan/macvlan_test.go @@ -117,7 +117,7 @@ type ( func newTesterByVersion(version string) tester { switch { - case strings.HasPrefix(version, "1.0."): + case strings.HasPrefix(version, "1."): return &testerV10x{} case strings.HasPrefix(version, "0.4."): return &testerV04x{} @@ -138,6 +138,7 @@ func (t *testerV10x) verifyResult(result types.Result, err error, name string, n Expect(r.Interfaces).To(HaveLen(1)) Expect(r.Interfaces[0].Name).To(Equal(name)) + Expect(r.Interfaces[0].Mtu).To(Equal(1500)) Expect(r.IPs).To(HaveLen(numAddrs)) return r.Interfaces[0].Mac @@ -322,6 +323,13 @@ var _ = Describe("macvlan Operations", func() { err := originalNS.Do(func(ns.NetNS) error { defer GinkgoRecover() + if testutils.SpecVersionHasSTATUS(ver) { + err := testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + result, _, err := testutils.CmdAddWithArgs(args, func() error { return cmdAdd(args) }) @@ -360,6 +368,13 @@ var _ = Describe("macvlan Operations", func() { return cmdDel(args) }) Expect(err).NotTo(HaveOccurred()) + + if testutils.SpecVersionHasGC(ver) { + err := testutils.CmdGC(func() error { + return cmdGC(args) + }) + Expect(err).NotTo(HaveOccurred()) + } return nil }) Expect(err).NotTo(HaveOccurred()) @@ -434,6 +449,13 @@ var _ = Describe("macvlan Operations", func() { err := originalNS.Do(func(ns.NetNS) error { defer GinkgoRecover() + if testutils.SpecVersionHasSTATUS(ver) { + err := testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + result, _, err := testutils.CmdAddWithArgs(args, func() error { return cmdAdd(args) }) @@ -472,6 +494,14 @@ var _ = Describe("macvlan Operations", func() { return cmdDel(args) }) Expect(err).NotTo(HaveOccurred()) + + if testutils.SpecVersionHasGC(ver) { + err := testutils.CmdGC(func() error { + return cmdGC(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + return nil }) Expect(err).NotTo(HaveOccurred()) @@ -520,6 +550,13 @@ var _ = Describe("macvlan Operations", func() { defer GinkgoRecover() var err error + if testutils.SpecVersionHasSTATUS(ver) { + err := testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + result, _, err = testutils.CmdAddWithArgs(args, func() error { return cmdAdd(args) }) @@ -587,6 +624,13 @@ var _ = Describe("macvlan Operations", func() { return cmdDel(args) }) Expect(err).NotTo(HaveOccurred()) + + if testutils.SpecVersionHasGC(ver) { + err := testutils.CmdGC(func() error { + return cmdGC(args) + }) + Expect(err).NotTo(HaveOccurred()) + } return nil }) Expect(err).NotTo(HaveOccurred()) @@ -660,6 +704,13 @@ var _ = Describe("macvlan Operations", func() { err = originalNS.Do(func(ns.NetNS) error { defer GinkgoRecover() + if testutils.SpecVersionHasSTATUS(ver) { + err := testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + result, _, err := testutils.CmdAddWithArgs(args, func() error { return cmdAdd(args) }) @@ -697,6 +748,13 @@ var _ = Describe("macvlan Operations", func() { err := testutils.CmdDelWithArgs(args, func() error { return cmdDel(args) }) + + if testutils.SpecVersionHasGC(ver) { + err := testutils.CmdGC(func() error { + return cmdGC(args) + }) + Expect(err).NotTo(HaveOccurred()) + } Expect(err).NotTo(HaveOccurred()) return nil }) diff --git a/plugins/main/ptp/ptp.go b/plugins/main/ptp/ptp.go index 841e57d12..b94721679 100644 --- a/plugins/main/ptp/ptp.go +++ b/plugins/main/ptp/ptp.go @@ -70,9 +70,11 @@ func setupContainerVeth(netns ns.NetNS, ifName string, mtu int, pr *current.Resu } hostInterface.Name = hostVeth.Name hostInterface.Mac = hostVeth.HardwareAddr.String() + hostInterface.Mtu = hostVeth.MTU containerInterface.Name = contVeth0.Name containerInterface.Mac = contVeth0.HardwareAddr.String() containerInterface.Sandbox = netns.Path() + containerInterface.Mtu = contVeth0.MTU for _, ipc := range pr.IPs { // All addresses apply to the container veth interface @@ -304,7 +306,13 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("ptp")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + GC: cmdGC, + }, version.All, bv.BuildString("ptp")) } func cmdCheck(args *skel.CmdArgs) error { @@ -379,6 +387,33 @@ func cmdCheck(args *skel.CmdArgs) error { return nil } +func cmdStatus(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if err := ipam.ExecStatus(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + + return nil +} + +func cmdGC(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if err := ipam.ExecGC(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + + // TODO: Clean up any stale masquerading rules + return nil +} + func validateCniContainerInterface(intf current.Interface) error { var link netlink.Link var err error diff --git a/plugins/main/ptp/ptp_test.go b/plugins/main/ptp/ptp_test.go index 0ede4409e..b380f92ae 100644 --- a/plugins/main/ptp/ptp_test.go +++ b/plugins/main/ptp/ptp_test.go @@ -17,11 +17,16 @@ package main import ( "encoding/json" "fmt" + "net" "os" + "path" "strings" + "github.com/coreos/go-iptables/iptables" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" "github.com/vishvananda/netlink" "github.com/containernetworking/cni/pkg/skel" @@ -29,8 +34,11 @@ import ( types020 "github.com/containernetworking/cni/pkg/types/020" types040 "github.com/containernetworking/cni/pkg/types/040" types100 "github.com/containernetworking/cni/pkg/types/100" + "github.com/containernetworking/cni/pkg/version" + "github.com/containernetworking/plugins/pkg/ip" "github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/testutils" + "github.com/containernetworking/plugins/pkg/utils" "github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator" ) @@ -104,7 +112,7 @@ type ( func newTesterByVersion(version string) tester { switch { - case strings.HasPrefix(version, "1.0."): + case strings.HasPrefix(version, "1."): return &testerV10x{} case strings.HasPrefix(version, "0.4."): return &testerV04x{} @@ -116,8 +124,9 @@ func newTesterByVersion(version string) tester { } type resultIP struct { - ip string - gw string + ip string + cidr string // same as ip, but with prefix + gw string } // verifyResult minimally verifies the Result and returns the interface's IP addresses and MAC address @@ -129,6 +138,8 @@ func (t *testerV10x) verifyResult(result types.Result, expectedIfName, expectedS Expect(r.Interfaces[0].Name).To(HavePrefix("veth")) Expect(r.Interfaces[0].Mac).To(HaveLen(17)) Expect(r.Interfaces[0].Sandbox).To(BeEmpty()) + Expect(r.Interfaces[0].Mtu).To(Equal(5000)) + Expect(r.Interfaces[1].Mtu).To(Equal(5000)) Expect(r.Interfaces[1].Name).To(Equal(expectedIfName)) Expect(r.Interfaces[1].Sandbox).To(Equal(expectedSandbox)) @@ -139,8 +150,9 @@ func (t *testerV10x) verifyResult(result types.Result, expectedIfName, expectedS for _, ipc := range r.IPs { if *ipc.Interface == 1 { ips = append(ips, resultIP{ - ip: ipc.Address.IP.String(), - gw: ipc.Gateway.String(), + ip: ipc.Address.IP.String(), + cidr: ipc.Address.String(), + gw: ipc.Gateway.String(), }) } } @@ -166,8 +178,9 @@ func verify0403(result types.Result, expectedIfName, expectedSandbox string, exp for _, ipc := range r.IPs { if *ipc.Interface == 1 { ips = append(ips, resultIP{ - ip: ipc.Address.IP.String(), - gw: ipc.Gateway.String(), + ip: ipc.Address.IP.String(), + cidr: ipc.Address.String(), + gw: ipc.Gateway.String(), }) } } @@ -193,14 +206,16 @@ func (t *testerV01xOr02x) verifyResult(result types.Result, _, _ string, _ types ips := []resultIP{} if r.IP4 != nil && r.IP4.IP.IP != nil { ips = append(ips, resultIP{ - ip: r.IP4.IP.IP.String(), - gw: r.IP4.Gateway.String(), + ip: r.IP4.IP.IP.String(), + cidr: r.IP4.IP.String(), + gw: r.IP4.Gateway.String(), }) } if r.IP6 != nil && r.IP6.IP.IP != nil { ips = append(ips, resultIP{ - ip: r.IP6.IP.IP.String(), - gw: r.IP6.Gateway.String(), + ip: r.IP6.IP.IP.String(), + cidr: r.IP6.IP.String(), + gw: r.IP6.Gateway.String(), }) } @@ -235,6 +250,10 @@ var _ = Describe("ptp Operations", func() { doTest := func(conf, cniVersion string, numIPs int, expectedDNSConf types.DNS, targetNS ns.NetNS) { const IFNAME = "ptp0" + expectMasq := gjson.Get(conf, "ipMasq").Bool() + name := gjson.Get(conf, "name").String() + Expect(name).NotTo(BeEmpty()) + args := &skel.CmdArgs{ ContainerID: "dummy", Netns: targetNS.Path(), @@ -249,41 +268,113 @@ var _ = Describe("ptp Operations", func() { defer GinkgoRecover() var err error + + if testutils.SpecVersionHasSTATUS(cniVersion) { + By("Doing a cni STATUS") + err = testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + + By(fmt.Sprintf("Doing a CNI ADD with configuration %s", args.StdinData)) result, _, err = testutils.CmdAddWithArgs(args, func() error { return cmdAdd(args) }) - Expect(err).NotTo(HaveOccurred()) - return nil + return err }) Expect(err).NotTo(HaveOccurred()) t := newTesterByVersion(cniVersion) ips, mac := t.verifyResult(result, IFNAME, targetNS.Path(), expectedDNSConf) Expect(ips).To(HaveLen(numIPs)) + GinkgoWriter.Printf("got result %+v\n", ips) // Make sure ptp link exists in the target namespace // Then, ping the gateway - err = targetNS.Do(func(ns.NetNS) error { - defer GinkgoRecover() + checkOK := func() error { + By("Checking that the container can ping the gateway") + return targetNS.Do(func(ns.NetNS) error { + defer GinkgoRecover() - link, err := netlink.LinkByName(IFNAME) - Expect(err).NotTo(HaveOccurred()) - if mac != "" { - Expect(mac).To(Equal(link.Attrs().HardwareAddr.String())) - } + link, err := netlink.LinkByName(IFNAME) + Expect(err).NotTo(HaveOccurred()) + if mac != "" { + Expect(mac).To(Equal(link.Attrs().HardwareAddr.String())) + } - for _, ipc := range ips { - fmt.Fprintln(GinkgoWriter, "ping", ipc.ip, "->", ipc.gw) - if err := testutils.Ping(ipc.ip, ipc.gw, 30); err != nil { - return fmt.Errorf("ping %s -> %s failed: %s", ipc.ip, ipc.gw, err) + for _, ipc := range ips { + fmt.Fprintln(GinkgoWriter, "ping", ipc.ip, "->", ipc.gw) + if err := testutils.Ping(ipc.ip, ipc.gw, 30); err != nil { + return fmt.Errorf("ping %s -> %s failed: %s", ipc.ip, ipc.gw, err) + } } - } + return nil + }) + } + + // checkIPAM relies on the details of the host-local plugin. + // It checks to see if the IP has been reserved + checkIPAM := func(ips []resultIP, expectEmpty bool) error { + GinkgoHelper() + for _, ip := range ips { + expectedFilename := path.Join(dataDir, name, ip.ip) + if _, err := os.Stat(expectedFilename); err != nil { + if expectEmpty && os.IsNotExist(err) { + continue + } + return err + } + if expectEmpty { + return fmt.Errorf("file %s existed but should not", expectedFilename) + } + } return nil - }) + } + + // Lists the expected iptables rules, returning whether or not masquerading + // is configured for the interface in question + checkMasq := func(ips []resultIP) bool { + GinkgoHelper() + if !expectMasq { + return true + } + found := false + err := originalNS.Do(func(ns.NetNS) error { + defer GinkgoRecover() + chain := utils.FormatChainName(name, args.ContainerID) + comment := utils.FormatComment(name, args.ContainerID) + for _, res := range ips { + addr, c, err := net.ParseCIDR(res.cidr) + Expect(err).NotTo(HaveOccurred()) + c.IP = addr + + // uncomment to debug + // If check fails, print iptables rules just for debugging + ipt, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) + if err == nil { + r, _ := ipt.List("nat", "POSTROUTING") + GinkgoWriter.Printf("rules: %+v\n", r) + } + + if err := ip.CheckIPMasq(c, chain, comment); err == nil { + found = true + } + } + return nil + }) + Expect(err).NotTo(HaveOccurred()) + return found + } + + err = checkOK() Expect(err).NotTo(HaveOccurred()) + Expect(checkIPAM(ips, false)).NotTo(HaveOccurred()) + Expect(checkMasq(ips)).To(BeTrue()) // call CmdCheck + By("Ensuring that CHECK reports no errors") n := &Net{} err = json.Unmarshal([]byte(conf), &n) Expect(err).NotTo(HaveOccurred()) @@ -312,6 +403,8 @@ var _ = Describe("ptp Operations", func() { args.StdinData = []byte(conf) + By("Issuing a CNI DEL and verifying the link has been deleted") + // Call the plugins with the DEL command, deleting the veth endpoints err = originalNS.Do(func(ns.NetNS) error { defer GinkgoRecover() @@ -334,6 +427,97 @@ var _ = Describe("ptp Operations", func() { return nil }) Expect(err).NotTo(HaveOccurred()) + + cniVers := gjson.GetBytes(args.StdinData, "cniVersion").String() + if gt, _ := version.GreaterThanOrEqualTo(cniVers, "1.1.0"); gt { + + By("Issuing a new CNI ADD and verifying a new link has been created") + + // Call ADD again, creating the attachment + err = originalNS.Do(func(ns.NetNS) error { + defer GinkgoRecover() + + var err error + result, _, err = testutils.CmdAddWithArgs(args, func() error { + return cmdAdd(args) + }) + return err + }) + Expect(err).NotTo(HaveOccurred()) + + ips, mac = t.verifyResult(result, IFNAME, targetNS.Path(), expectedDNSConf) + + // Call GC, asking for the attachment to be preserved + // Ensure that connectivity remains + + By("Issuing a CNI GC that should preserve the attachment") + err = originalNS.Do(func(ns.NetNS) error { + defer GinkgoRecover() + var err error + gcArgs := &skel.CmdArgs{ + Path: args.Path, + NetnsOverride: args.NetnsOverride, + } + + validAttachments := []types.GCAttachment{ + { + ContainerID: args.ContainerID, + IfName: args.IfName, + }, + } + + gcArgs.StdinData, err = sjson.SetBytes(args.StdinData, "cni\\.dev/valid-attachments", validAttachments) + if err != nil { + return err + } + By(fmt.Sprintf("calling GC with configuration %s", gcArgs.StdinData)) + + return testutils.CmdGC(func() error { + return cmdGC(gcArgs) + }) + }) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying that connectivity, IPAM, and masquerading are configured") + err = checkOK() + Expect(err).NotTo(HaveOccurred()) + Expect(checkIPAM(ips, false)).NotTo(HaveOccurred()) + // Expect(checkMasq(ips)).To(BeTrue()) // TODO: GC masquerading + + // Call GC, asking for the attachment *not* to be preserved + // Ensure that ipam and iptables rules are cleaned up + By("Issuing a CNI GC that should not preserve the attachment") + err = originalNS.Do(func(ns.NetNS) error { + defer GinkgoRecover() + gcArgs := &skel.CmdArgs{ + Path: args.Path, + NetnsOverride: args.NetnsOverride, + StdinData: args.StdinData, + } + + return testutils.CmdGC(func() error { + return cmdGC(gcArgs) + }) + }) + Expect(err).NotTo(HaveOccurred()) + + By("Ensuring that IPAM and masquerading have been cleaned up") + Expect(checkIPAM(ips, true)).NotTo(HaveOccurred()) + // Expect(checkMasq(ips)).To(BeFalse()) // TODO: GC masquerading + + By("Issuing a CNI DEL for the link") + // Call the plugins with the DEL command, deleting the veth endpoints + err = originalNS.Do(func(ns.NetNS) error { + defer GinkgoRecover() + + err := testutils.CmdDelWithArgs(args, func() error { + return cmdDel(args) + }) + Expect(err).NotTo(HaveOccurred()) + return nil + }) + Expect(err).NotTo(HaveOccurred()) + } } for _, ver := range testutils.AllSpecVersions { diff --git a/plugins/main/tap/tap.go b/plugins/main/tap/tap.go index 48381b4e0..4f672fdd5 100644 --- a/plugins/main/tap/tap.go +++ b/plugins/main/tap/tap.go @@ -236,6 +236,7 @@ func createTap(conf *NetConf, ifName string, netns ns.NetNS) (*current.Interface tap.Mac = link.Attrs().HardwareAddr.String() tap.Sandbox = netns.Path() + tap.Mtu = link.Attrs().MTU return nil }) @@ -386,7 +387,13 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("tap")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + GC: cmdGC, + }, version.All, bv.BuildString("tap")) } func cmdCheck(args *skel.CmdArgs) error { @@ -455,3 +462,33 @@ func cmdCheck(args *skel.CmdArgs) error { return nil }) } + +func cmdStatus(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if conf.IPAM.Type != "" { + if err := ipam.ExecStatus(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + return nil +} + +func cmdGC(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if conf.IPAM.Type != "" { + if err := ipam.ExecGC(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + return nil +} diff --git a/plugins/main/tap/tap_test.go b/plugins/main/tap/tap_test.go index c39a25ee6..a8fd48c34 100644 --- a/plugins/main/tap/tap_test.go +++ b/plugins/main/tap/tap_test.go @@ -108,7 +108,7 @@ type ( func newTesterByVersion(version string) tester { switch { - case strings.HasPrefix(version, "1.0."): + case strings.HasPrefix(version, "1."): return &testerV10x{} case strings.HasPrefix(version, "0.4."): return &testerV04x{} @@ -126,6 +126,7 @@ func (t *testerV10x) verifyResult(result types.Result, name string) string { Expect(r.Interfaces).To(HaveLen(1)) Expect(r.Interfaces[0].Name).To(Equal(name)) + Expect(r.Interfaces[0].Mtu).To(BeNumerically(">", 0)) Expect(r.IPs).To(HaveLen(1)) return r.Interfaces[0].Mac @@ -223,6 +224,13 @@ var _ = Describe("Add, check, remove tap plugin", func() { err = originalNS.Do(func(ns.NetNS) error { defer GinkgoRecover() + if testutils.SpecVersionHasSTATUS(ver) { + err := testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + result, _, err = testutils.CmdAddWithArgs(args, func() error { return cmdAdd(args) }) @@ -288,6 +296,13 @@ var _ = Describe("Add, check, remove tap plugin", func() { return cmdDel(args) }) Expect(err).NotTo(HaveOccurred()) + + if testutils.SpecVersionHasGC(ver) { + err := testutils.CmdGC(func() error { + return cmdGC(args) + }) + Expect(err).NotTo(HaveOccurred()) + } return nil }) Expect(err).NotTo(HaveOccurred()) @@ -364,6 +379,13 @@ var _ = Describe("Add, check, remove tap plugin", func() { err = originalNS.Do(func(ns.NetNS) error { defer GinkgoRecover() + if testutils.SpecVersionHasSTATUS(ver) { + err := testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + result, _, err = testutils.CmdAddWithArgs(args, func() error { return cmdAdd(args) }) @@ -404,6 +426,13 @@ var _ = Describe("Add, check, remove tap plugin", func() { return cmdDel(args) }) Expect(err).NotTo(HaveOccurred()) + + if testutils.SpecVersionHasGC(ver) { + err := testutils.CmdGC(func() error { + return cmdGC(args) + }) + Expect(err).NotTo(HaveOccurred()) + } return nil }) Expect(err).NotTo(HaveOccurred()) diff --git a/plugins/main/vlan/vlan.go b/plugins/main/vlan/vlan.go index 3f2987a33..a15aeb71e 100644 --- a/plugins/main/vlan/vlan.go +++ b/plugins/main/vlan/vlan.go @@ -153,6 +153,7 @@ func createVlan(conf *NetConf, ifName string, netns ns.NetNS) (*current.Interfac return fmt.Errorf("failed to refetch vlan %q: %v", vlan.Name, err) } vlan.Mac = contVlan.Attrs().HardwareAddr.String() + vlan.Mtu = contVlan.Attrs().MTU vlan.Sandbox = netns.Path() return nil @@ -259,7 +260,13 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("vlan")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + GC: cmdGC, + }, version.All, bv.BuildString("vlan")) } func cmdCheck(args *skel.CmdArgs) error { @@ -392,3 +399,29 @@ func validateCniContainerInterface(intf current.Interface, vlanID int, mtu int) return nil } + +func cmdStatus(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if err := ipam.ExecStatus(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + + return nil +} + +func cmdGC(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if err := ipam.ExecGC(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + + return nil +} diff --git a/plugins/main/vlan/vlan_test.go b/plugins/main/vlan/vlan_test.go index c0cd3ed30..d2705e897 100644 --- a/plugins/main/vlan/vlan_test.go +++ b/plugins/main/vlan/vlan_test.go @@ -113,7 +113,7 @@ type ( func newTesterByVersion(version string) tester { switch { - case strings.HasPrefix(version, "1.0."): + case strings.HasPrefix(version, "1."): return &testerV10x{} case strings.HasPrefix(version, "0.4."): return &testerV04x{} @@ -131,6 +131,7 @@ func (t *testerV10x) verifyResult(result types.Result, name string) string { Expect(r.Interfaces).To(HaveLen(1)) Expect(r.Interfaces[0].Name).To(Equal(name)) + Expect(r.Interfaces[0].Mtu).To(BeNumerically(">", 0)) Expect(r.IPs).To(HaveLen(1)) return r.Interfaces[0].Mac @@ -352,8 +353,15 @@ var _ = Describe("vlan Operations", func() { var macAddress string err := originalNS.Do(func(ns.NetNS) error { defer GinkgoRecover() - var err error + + if testutils.SpecVersionHasSTATUS(ver) { + err := testutils.CmdStatus(func() error { + return cmdStatus(args) + }) + Expect(err).NotTo(HaveOccurred()) + } + result, _, err = testutils.CmdAddWithArgs(args, func() error { return cmdAdd(args) }) @@ -425,6 +433,13 @@ var _ = Describe("vlan Operations", func() { return cmdDel(args) }) Expect(err).NotTo(HaveOccurred()) + + if testutils.SpecVersionHasGC(ver) { + err := testutils.CmdGC(func() error { + return cmdGC(args) + }) + Expect(err).NotTo(HaveOccurred()) + } return nil }) Expect(err).NotTo(HaveOccurred()) diff --git a/plugins/main/windows/win-bridge/win-bridge_windows.go b/plugins/main/windows/win-bridge/win-bridge_windows.go index 3e6de2b41..988fc38cc 100644 --- a/plugins/main/windows/win-bridge/win-bridge_windows.go +++ b/plugins/main/windows/win-bridge/win-bridge_windows.go @@ -215,5 +215,42 @@ func cmdCheck(_ *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("win-bridge")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + GC: cmdGC, + }, version.All, bv.BuildString("win-bridge")) +} + +func cmdStatus(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if conf.IPAM.Type != "" { + if err := ipam.ExecStatus(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + return nil +} + +func cmdGC(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if conf.IPAM.Type != "" { + if err := ipam.ExecGC(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + } + + // TODO: Clean up any stale masq policy? + return nil } diff --git a/plugins/main/windows/win-overlay/win-overlay_windows.go b/plugins/main/windows/win-overlay/win-overlay_windows.go index 2df1b74b8..9d3c77a37 100644 --- a/plugins/main/windows/win-overlay/win-overlay_windows.go +++ b/plugins/main/windows/win-overlay/win-overlay_windows.go @@ -106,13 +106,13 @@ func cmdHcnAdd(args *skel.CmdArgs, n *NetConf) (*current.Result, error) { return nil, errors.Annotatef(err, "error while hcn.GetNetworkByName(%s)", networkName) } if hcnNetwork == nil { - return nil, fmt.Errorf("network %v is not found", networkName) + return nil, fmt.Errorf("network %v is not found", networkName) } if hnsNetwork == nil { return nil, fmt.Errorf("network %v not found", networkName) } - if !strings.EqualFold(string (hcnNetwork.Type), "Overlay") { + if !strings.EqualFold(string(hcnNetwork.Type), "Overlay") { return nil, fmt.Errorf("network %v is of an unexpected type: %v", networkName, hcnNetwork.Type) } @@ -288,5 +288,38 @@ func cmdCheck(_ *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("win-overlay")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + GC: cmdGC, + }, version.All, bv.BuildString("win-overlay")) +} + +func cmdStatus(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if err := ipam.ExecStatus(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + + return nil +} + +func cmdGC(args *skel.CmdArgs) error { + conf := NetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %w", err) + } + + if err := ipam.ExecGC(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + + // TODO: Clean up any stale HNS endpoints + return nil } diff --git a/plugins/meta/bandwidth/main.go b/plugins/meta/bandwidth/main.go index 66eae1521..308b7919f 100644 --- a/plugins/meta/bandwidth/main.go +++ b/plugins/meta/bandwidth/main.go @@ -218,6 +218,7 @@ func cmdAdd(args *skel.CmdArgs) error { result.Interfaces = append(result.Interfaces, ¤t.Interface{ Name: ifbDeviceName, Mac: ifbDevice.Attrs().HardwareAddr.String(), + Mtu: mtu, }) err = CreateEgressQdisc(bandwidth.EgressRate, bandwidth.EgressBurst, hostInterface.Name, ifbDeviceName) if err != nil { @@ -240,7 +241,16 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.VersionsStartingFrom("0.3.0"), bv.BuildString("bandwidth")) + // TODO: clean up stale IFB devices via GC + // Cannot do this until we can filter out only ifb devices that belong + // to this network. + skel.PluginMainFuncs( + skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + }, + version.VersionsStartingFrom("0.3.0"), bv.BuildString("bandwidth")) } func SafeQdiscList(link netlink.Link) ([]netlink.Qdisc, error) { diff --git a/plugins/meta/firewall/firewall.go b/plugins/meta/firewall/firewall.go index f2f8a4d56..0e0e61c2e 100644 --- a/plugins/meta/firewall/firewall.go +++ b/plugins/meta/firewall/firewall.go @@ -179,7 +179,13 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.VersionsStartingFrom("0.4.0"), bv.BuildString("firewall")) + skel.PluginMainFuncs( + skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + Status: cmdStatus, + }, version.VersionsStartingFrom("0.4.0"), bv.BuildString("firewall")) } func cmdCheck(args *skel.CmdArgs) error { @@ -200,3 +206,15 @@ func cmdCheck(args *skel.CmdArgs) error { return backend.Check(conf, result) } + +func cmdStatus(args *skel.CmdArgs) error { + conf := FirewallNetConf{} + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to load netconf: %v", err) + } + + if conf.Backend == "firewalld" && !isFirewalldRunning() { + return types.NewError(50, "firewalld down", "unable to connect to the firewalld backend") + } + return nil +} diff --git a/plugins/meta/portmap/main.go b/plugins/meta/portmap/main.go index 09af586a5..cfab9a350 100644 --- a/plugins/meta/portmap/main.go +++ b/plugins/meta/portmap/main.go @@ -134,7 +134,12 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("portmap")) + skel.PluginMainFuncs( + skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + }, version.All, bv.BuildString("portmap")) } func cmdCheck(args *skel.CmdArgs) error { diff --git a/plugins/meta/sbr/main.go b/plugins/meta/sbr/main.go index f07e7520f..89be78e11 100644 --- a/plugins/meta/sbr/main.go +++ b/plugins/meta/sbr/main.go @@ -401,7 +401,12 @@ RULE_LOOP: } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("sbr")) + skel.PluginMainFuncs( + skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + }, version.All, bv.BuildString("sbr")) } func cmdCheck(_ *skel.CmdArgs) error { diff --git a/plugins/meta/tuning/tuning.go b/plugins/meta/tuning/tuning.go index ed23d9208..cb0ab59dd 100644 --- a/plugins/meta/tuning/tuning.go +++ b/plugins/meta/tuning/tuning.go @@ -433,7 +433,12 @@ func cmdDel(args *skel.CmdArgs) error { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("tuning")) + skel.PluginMainFuncs( + skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + }, version.All, bv.BuildString("tuning")) } func cmdCheck(args *skel.CmdArgs) error { diff --git a/plugins/meta/vrf/main.go b/plugins/meta/vrf/main.go index c83234328..c73363c32 100644 --- a/plugins/meta/vrf/main.go +++ b/plugins/meta/vrf/main.go @@ -39,7 +39,12 @@ type VRFNetConf struct { } func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.VersionsStartingFrom("0.3.1"), bv.BuildString("vrf")) + skel.PluginMainFuncs( + skel.CNIFuncs{ + Add: cmdAdd, + Check: cmdCheck, + Del: cmdDel, + }, version.VersionsStartingFrom("0.3.1"), bv.BuildString("vrf")) } func cmdAdd(args *skel.CmdArgs) error { diff --git a/plugins/sample/main.go b/plugins/sample/main.go index 1cad80536..96c437765 100644 --- a/plugins/sample/main.go +++ b/plugins/sample/main.go @@ -24,6 +24,7 @@ import ( "github.com/containernetworking/cni/pkg/types" current "github.com/containernetworking/cni/pkg/types/100" "github.com/containernetworking/cni/pkg/version" + "github.com/containernetworking/plugins/pkg/ipam" bv "github.com/containernetworking/plugins/pkg/utils/buildversion" ) @@ -150,10 +151,65 @@ func cmdDel(args *skel.CmdArgs) error { func main() { // replace TODO with your plugin name - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, bv.BuildString("TODO")) + skel.PluginMainFuncs(skel.CNIFuncs{ + Add: cmdAdd, + Del: cmdDel, + Check: cmdCheck, + Status: cmdStatus, + GC: cmdGc, + }, version.All, bv.BuildString("a sample plugin")) } func cmdCheck(_ *skel.CmdArgs) error { // TODO: implement return fmt.Errorf("not implemented") } + +// cmdStatus implements the STATUS command, which indicates whether or not +// this plugin is able to accept ADD requests. +// +// If the plugin has external dependencies, such as a daemon +// or chained ipam plugin, it should determine their status. If all is well, +// and an ADD can be successfully processed, return nil +func cmdStatus(args *skel.CmdArgs) error { + conf, err := parseConfig(args.StdinData) + if err != nil { + return err + } + _ = conf + + // If this plugins delegates IPAM, ensure that IPAM is also running + if err := ipam.ExecStatus(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + + // TODO: implement STATUS here + // e.g. querying an external deamon, or delegating STATUS to an IPAM plugin + + return nil +} + +// cmdGc implements the GC command, which the runtime uses to indicate +// the currently valid set of attachments for a given configuration; any +// resources not owned by these containers may be deleted. +// +// The set of valid attachments is provided in the variable 'cni.dev/valid-attachments'. +// All other attachments should be considered invalid. +func cmdGc(args *skel.CmdArgs) error { + conf, err := parseConfig(args.StdinData) + if err != nil { + return err + } + _ = conf + + // If this plugin delegates IPAM, then GC must be passed + if err := ipam.ExecGC(conf.IPAM.Type, args.StdinData); err != nil { + return err + } + + // TODO: implement GC here + // e.g clean up any stale resources, such as iptables rules. + // You can assume that anything attached to a network namespace is gone. + + return nil +} diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 000000000..6b061e617 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 000000000..fbc633259 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,27 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - misspell + - govet + - staticcheck + - errcheck + - unparam + - ineffassign + - nakedret + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck + - unused + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 600 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 000000000..f12626423 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,214 @@ +# Changelog + +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 000000000..9ff7da9c4 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 000000000..0e7b5c713 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,30 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: + @echo "==> Running Fuzz Tests" + go test -fuzz=FuzzNewVersion -fuzztime=15s . + go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . + go test -fuzz=FuzzNewConstraint -fuzztime=15s . + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 000000000..eab8cac3b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,258 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Package Versions + +Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version. + +There are three major versions fo the `semver` package. + +* 3.x.x is the stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the original release. It is no longer maintained. You should use the + v3 release instead. You can read the documentation for the 1.x.x release + [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The a variable will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of prereleases include +development, alpha, beta, and release candidate releases. A prerelease may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, prereleases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification prereleases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons using constraints without a prerelease comparator will skip +prerelease versions. For example, `>=1.2.3` will skip prereleases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). + +## Security + +Security is an important consideration for this project. The project currently +uses the following tools to help discover security issues: + +* [CodeQL](https://github.com/Masterminds/semver) +* [gosec](https://github.com/securego/gosec) +* Daily Fuzz testing + +If you believe you have found a security vulnerability you can privately disclose +it through the [GitHub security page](https://github.com/Masterminds/semver/security). diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md new file mode 100644 index 000000000..a30a66b1f --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +The following versions of semver are currently supported: + +| Version | Supported | +| ------- | ------------------ | +| 3.x | :white_check_mark: | +| 2.x | :x: | +| 1.x | :x: | + +Fixes are only released for the latest minor version in the form of a patch release. + +## Reporting a Vulnerability + +You can privately disclose a vulnerability through GitHubs +[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories) +mechanism. diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 000000000..a78235895 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 000000000..8461c7ed9 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,594 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = fmt.Sprintf("0.0.0%s", m[6]) + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 000000000..74f97caa5 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix + +# Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +# Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +# Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +# Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to + +# Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +# Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 000000000..7c4bed334 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,639 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // check for prerelease or build metadata + var extra []string + if strings.ContainsAny(parts[2], "-+") { + // Start with the build metadata first as it needs to be on the right + extra = strings.SplitN(parts[2], "+", 2) + if len(extra) > 1 { + // build metadata found + sv.metadata = extra[1] + parts[2] = extra[0] + } + + extra = strings.SplitN(parts[2], "-", 2) + if len(extra) > 1 { + // prerelease found + sv.pre = extra[1] + parts[2] = extra[0] + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract the major, minor, and patch elements onto the returned Version + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + // No prerelease or build metadata found so returning now as a fastpath. + if sv.pre == "" && sv.metadata == "" { + return sv, nil + } + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go index 0d82a2dd3..5c7f3b028 100644 --- a/vendor/github.com/containernetworking/cni/libcni/api.go +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -15,7 +15,7 @@ package libcni // Note this is the actual implementation of the CNI specification, which -// is reflected in the https://github.com/containernetworking/cni/blob/master/SPEC.md file +// is reflected in the SPEC.md file. // it is typically bundled into runtime providers (i.e. containerd or cri-o would use this // before calling runc or hcsshim). It is also bundled into CNI providers as well, for example, // to add an IP to a container, to parse the configuration of the CNI and so on. @@ -24,9 +24,9 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" + "sort" "strings" "github.com/containernetworking/cni/pkg/invoke" @@ -38,6 +38,8 @@ import ( var ( CacheDir = "/var/lib/cni" + // slightly awkward wording to preserve anyone matching on error strings + ErrorCheckNotSupp = fmt.Errorf("does not support the CHECK command") ) const ( @@ -77,6 +79,20 @@ type NetworkConfigList struct { Bytes []byte } +type NetworkAttachment struct { + ContainerID string + Network string + IfName string + Config []byte + NetNS string + CniArgs [][2]string + CapabilityArgs map[string]interface{} +} + +type GCArgs struct { + ValidAttachments []types.GCAttachment +} + type CNI interface { AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error @@ -92,6 +108,11 @@ type CNI interface { ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) + + GCNetworkList(ctx context.Context, net *NetworkConfigList, args *GCArgs) error + GetStatusNetworkList(ctx context.Context, net *NetworkConfigList) error + + GetCachedAttachments(containerID string) ([]*NetworkAttachment, error) } type CNIConfig struct { @@ -139,8 +160,11 @@ func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult typ if err != nil { return nil, err } + if rt != nil { + return injectRuntimeConfig(orig, rt) + } - return injectRuntimeConfig(orig, rt) + return orig, nil } // This function takes a libcni RuntimeConf structure and injects values into @@ -195,6 +219,7 @@ type cachedInfo struct { Config []byte `json:"config"` IfName string `json:"ifName"` NetworkName string `json:"networkName"` + NetNS string `json:"netns,omitempty"` CniArgs [][2]string `json:"cniArgs,omitempty"` CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` RawResult map[string]interface{} `json:"result,omitempty"` @@ -229,6 +254,7 @@ func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, Config: config, IfName: rt.IfName, NetworkName: netName, + NetNS: rt.NetNS, CniArgs: rt.Args, CapabilityArgs: rt.CapabilityArgs, } @@ -254,11 +280,11 @@ func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, if err != nil { return err } - if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(fname), 0o700); err != nil { return err } - return ioutil.WriteFile(fname, newBytes, 0600) + return os.WriteFile(fname, newBytes, 0o600) } func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { @@ -277,7 +303,7 @@ func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *R if err != nil { return nil, nil, err } - bytes, err = ioutil.ReadFile(fname) + bytes, err = os.ReadFile(fname) if err != nil { // Ignore read errors; the cached result may not exist on-disk return nil, nil, nil @@ -305,7 +331,7 @@ func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *Runtim if err != nil { return nil, err } - data, err := ioutil.ReadFile(fname) + data, err := os.ReadFile(fname) if err != nil { // Ignore read errors; the cached result may not exist on-disk return nil, nil @@ -333,7 +359,7 @@ func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) if err != nil { return nil, err } - fdata, err := ioutil.ReadFile(fname) + fdata, err := os.ReadFile(fname) if err != nil { // Ignore read errors; the cached result may not exist on-disk return nil, nil @@ -390,6 +416,65 @@ func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) return c.getCachedConfig(net.Network.Name, rt) } +// GetCachedAttachments returns a list of network attachments from the cache. +// The returned list will be filtered by the containerID if the value is not empty. +func (c *CNIConfig) GetCachedAttachments(containerID string) ([]*NetworkAttachment, error) { + dirPath := filepath.Join(c.getCacheDir(&RuntimeConf{}), "results") + entries, err := os.ReadDir(dirPath) + if err != nil { + return nil, err + } + + fileNames := make([]string, 0, len(entries)) + for _, e := range entries { + fileNames = append(fileNames, e.Name()) + } + sort.Strings(fileNames) + + attachments := []*NetworkAttachment{} + for _, fname := range fileNames { + if len(containerID) > 0 { + part := fmt.Sprintf("-%s-", containerID) + pos := strings.Index(fname, part) + if pos <= 0 || pos+len(part) >= len(fname) { + continue + } + } + + cacheFile := filepath.Join(dirPath, fname) + bytes, err := os.ReadFile(cacheFile) + if err != nil { + continue + } + + cachedInfo := cachedInfo{} + + if err := json.Unmarshal(bytes, &cachedInfo); err != nil { + continue + } + if cachedInfo.Kind != CNICacheV1 { + continue + } + if len(containerID) > 0 && cachedInfo.ContainerID != containerID { + continue + } + if cachedInfo.IfName == "" || cachedInfo.NetworkName == "" { + continue + } + + attachments = append(attachments, &NetworkAttachment{ + ContainerID: cachedInfo.ContainerID, + Network: cachedInfo.NetworkName, + IfName: cachedInfo.IfName, + Config: cachedInfo.Config, + NetNS: cachedInfo.NetNS, + CniArgs: cachedInfo.CniArgs, + CapabilityArgs: cachedInfo.CapabilityArgs, + }) + } + return attachments, nil +} + func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { c.ensureExec() pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) @@ -453,7 +538,7 @@ func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigLis if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { return err } else if !gtet { - return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) + return fmt.Errorf("configuration version %q %w", list.CNIVersion, ErrorCheckNotSupp) } if list.DisableCheck { @@ -497,9 +582,9 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { return err } else if gtet { - cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt) - if err != nil { - return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + if cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt); err != nil { + _ = c.cacheDel(list.Name, rt) + cachedResult = nil } } @@ -509,7 +594,10 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err) } } - _ = c.cacheDel(list.Name, rt) + + if cachedResult != nil { + _ = c.cacheDel(list.Name, rt) + } return nil } @@ -547,7 +635,7 @@ func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *Ru if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { return err } else if !gtet { - return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) + return fmt.Errorf("configuration version %q %w", net.Network.CNIVersion, ErrorCheckNotSupp) } cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) @@ -666,6 +754,116 @@ func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (vers return invoke.GetVersionInfo(ctx, pluginPath, c.exec) } +// GCNetworkList will do two things +// - dump the list of cached attachments, and issue deletes as necessary +// - issue a GC to the underlying plugins (if the version is high enough) +func (c *CNIConfig) GCNetworkList(ctx context.Context, list *NetworkConfigList, args *GCArgs) error { + // First, get the list of cached attachments + cachedAttachments, err := c.GetCachedAttachments("") + if err != nil { + return nil + } + + validAttachments := make(map[types.GCAttachment]interface{}, len(args.ValidAttachments)) + for _, a := range args.ValidAttachments { + validAttachments[a] = nil + } + + var errs []error + + for _, cachedAttachment := range cachedAttachments { + if cachedAttachment.Network != list.Name { + continue + } + // we found this attachment + gca := types.GCAttachment{ + ContainerID: cachedAttachment.ContainerID, + IfName: cachedAttachment.IfName, + } + if _, ok := validAttachments[gca]; ok { + continue + } + // otherwise, this attachment wasn't valid and we should issue a CNI DEL + rt := RuntimeConf{ + ContainerID: cachedAttachment.ContainerID, + NetNS: cachedAttachment.NetNS, + IfName: cachedAttachment.IfName, + Args: cachedAttachment.CniArgs, + CapabilityArgs: cachedAttachment.CapabilityArgs, + } + if err := c.DelNetworkList(ctx, list, &rt); err != nil { + errs = append(errs, fmt.Errorf("failed to delete stale attachment %s %s: %w", rt.ContainerID, rt.IfName, err)) + } + } + + // now, if the version supports it, issue a GC + if gt, _ := version.GreaterThanOrEqualTo(list.CNIVersion, "1.1.0"); gt { + inject := map[string]interface{}{ + "name": list.Name, + "cniVersion": list.CNIVersion, + "cni.dev/valid-attachments": args.ValidAttachments, + } + for _, plugin := range list.Plugins { + // build config here + pluginConfig, err := InjectConf(plugin, inject) + if err != nil { + errs = append(errs, fmt.Errorf("failed to generate configuration to GC plugin %s: %w", plugin.Network.Type, err)) + } + if err := c.gcNetwork(ctx, pluginConfig); err != nil { + errs = append(errs, fmt.Errorf("failed to GC plugin %s: %w", plugin.Network.Type, err)) + } + } + } + + return joinErrors(errs...) +} + +func (c *CNIConfig) gcNetwork(ctx context.Context, net *NetworkConfig) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + args := c.args("GC", &RuntimeConf{}) + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, net.Bytes, args, c.exec) +} + +func (c *CNIConfig) GetStatusNetworkList(ctx context.Context, list *NetworkConfigList) error { + // If the version doesn't support status, abort. + if gt, _ := version.GreaterThanOrEqualTo(list.CNIVersion, "1.1.0"); !gt { + return nil + } + + inject := map[string]interface{}{ + "name": list.Name, + "cniVersion": list.CNIVersion, + } + + for _, plugin := range list.Plugins { + // build config here + pluginConfig, err := InjectConf(plugin, inject) + if err != nil { + return fmt.Errorf("failed to generate configuration to get plugin STATUS %s: %w", plugin.Network.Type, err) + } + if err := c.getStatusNetwork(ctx, pluginConfig); err != nil { + return err // Don't collect errors here, so we return a clean error code. + } + } + return nil +} + +func (c *CNIConfig) getStatusNetwork(ctx context.Context, net *NetworkConfig) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + args := c.args("STATUS", &RuntimeConf{}) + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, net.Bytes, args, c.exec) +} + // ===== func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { return &invoke.Args{ diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go index 3cd6a59d1..6c5d99de9 100644 --- a/vendor/github.com/containernetworking/cni/libcni/conf.go +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -16,13 +16,17 @@ package libcni import ( "encoding/json" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" "sort" + "strings" + + "github.com/Masterminds/semver/v3" "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/version" ) type NotFoundError struct { @@ -54,7 +58,7 @@ func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { } func ConfFromFile(filename string) (*NetworkConfig, error) { - bytes, err := ioutil.ReadFile(filename) + bytes, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("error reading %s: %w", filename, err) } @@ -85,11 +89,63 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { } } + rawVersions, ok := rawList["cniVersions"] + if ok { + // Parse the current package CNI version + currentVersion, err := semver.NewVersion(version.Current()) + if err != nil { + panic("CNI version is invalid semver!") + } + + rvs, ok := rawVersions.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions: %T", rvs) + } + vs := make([]*semver.Version, 0, len(rvs)) + for i, rv := range rvs { + v, ok := rv.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions index %d: %T", i, rv) + } + if v, err := semver.NewVersion(v); err != nil { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersions entry %s at index %d: %w", v, i, err) + } else if !v.GreaterThan(currentVersion) { + // Skip versions "greater" than this implementation of the spec + vs = append(vs, v) + } + } + + // if cniVersion was already set, append it to the list for sorting. + if cniVersion != "" { + if v, err := semver.NewVersion(cniVersion); err != nil { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion %s: %w", cniVersion, err) + } else if !v.GreaterThan(currentVersion) { + // ignore any versions higher than the current implemented spec version + vs = append(vs, v) + } + } + sort.Sort(semver.Collection(vs)) + if len(vs) > 0 { + cniVersion = vs[len(vs)-1].String() + } + } + disableCheck := false if rawDisableCheck, ok := rawList["disableCheck"]; ok { disableCheck, ok = rawDisableCheck.(bool) if !ok { - return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + disableCheckStr, ok := rawDisableCheck.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + } + switch { + case strings.ToLower(disableCheckStr) == "false": + disableCheck = false + case strings.ToLower(disableCheckStr) == "true": + disableCheck = true + default: + return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck value %q", disableCheckStr) + } } } @@ -129,7 +185,7 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { } func ConfListFromFile(filename string) (*NetworkConfigList, error) { - bytes, err := ioutil.ReadFile(filename) + bytes, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("error reading %s: %w", filename, err) } @@ -138,7 +194,7 @@ func ConfListFromFile(filename string) (*NetworkConfigList, error) { func ConfFiles(dir string, extensions []string) ([]string, error) { // In part, adapted from rkt/networking/podenv.go#listFiles - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) switch { case err == nil: // break case os.IsNotExist(err): @@ -206,7 +262,8 @@ func LoadConfList(dir, name string) (*NetworkConfigList, error) { singleConf, err := LoadConf(dir, name) if err != nil { // A little extra logic so the error makes sense - if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { + var ncfErr NoConfigsFoundError + if len(files) != 0 && errors.As(err, &ncfErr) { // Config lists found but no config files found return nil, NotFoundError{dir, name} } diff --git a/vendor/github.com/containernetworking/cni/libcni/multierror.go b/vendor/github.com/containernetworking/cni/libcni/multierror.go new file mode 100644 index 000000000..100fb8392 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/multierror.go @@ -0,0 +1,58 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright the CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Adapted from errors/join.go from go 1.20 +// This package can be removed once the toolchain is updated to 1.20 + +package libcni + +func joinErrors(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &multiError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type multiError struct { + errs []error +} + +func (e *multiError) Error() string { + var b []byte + for i, err := range e.errs { + if i > 0 { + b = append(b, '\n') + } + b = append(b, err.Error()...) + } + return string(b) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go index 8defe4dd3..c8b548e7c 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -51,25 +51,34 @@ func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exe // DelegateCheck calls the given delegate plugin with the CNI CHECK action and // JSON configuration func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "CHECK") +} + +func delegateNoResult(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec, verb string) error { pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) if err != nil { return err } - // DelegateCheck will override the original CNI_COMMAND env from process with CHECK - return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs(verb), realExec) } // DelegateDel calls the given delegate plugin with the CNI DEL action and // JSON configuration func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { - pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) - if err != nil { - return err - } + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "DEL") +} - // DelegateDel will override the original CNI_COMMAND env from process with DEL - return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) +// DelegateStatus calls the given delegate plugin with the CNI STATUS action and +// JSON configuration +func DelegateStatus(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "STATUS") +} + +// DelegateGC calls the given delegate plugin with the CNI GC action and +// JSON configuration +func DelegateGC(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "GC") } // return CNIArgs used by delegation diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go index 3ad07aa8f..a5e015fc9 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -81,17 +81,17 @@ func fixupResultVersion(netconf, result []byte) (string, []byte, error) { // object to ExecPluginWithResult() to verify the incoming stdin and environment // and provide a tailored response: // -//import ( +// import ( // "encoding/json" // "path" // "strings" -//) +// ) // -//type fakeExec struct { +// type fakeExec struct { // version.PluginDecoder -//} +// } // -//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { // net := &types.NetConf{} // err := json.Unmarshal(stdinData, net) // if err != nil { @@ -109,14 +109,14 @@ func fixupResultVersion(netconf, result []byte) (string, []byte, error) { // } // } // return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil -//} +// } // -//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { // if len(paths) > 0 { // return path.Join(paths[0], plugin), nil // } // return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) -//} +// } func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { if exec == nil { diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go index 9bcfb4553..ed0999bd0 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris package invoke diff --git a/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go new file mode 100644 index 000000000..3d58e75d6 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go @@ -0,0 +1,50 @@ +// Copyright 2022 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import ( + "runtime" + + "github.com/vishvananda/netns" + + "github.com/containernetworking/cni/pkg/types" +) + +// Returns an object representing the current OS thread's network namespace +func getCurrentNS() (netns.NsHandle, error) { + // Lock the thread in case other goroutine executes in it and changes its + // network namespace after getCurrentThreadNetNSPath(), otherwise it might + // return an unexpected network namespace. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + return netns.Get() +} + +func CheckNetNS(nsPath string) (bool, *types.Error) { + ns, err := netns.GetFromPath(nsPath) + // Let plugins check whether nsPath from args is valid. Also support CNI DEL for empty nsPath as already-deleted nsPath. + if err != nil { + return false, nil + } + defer ns.Close() + + pluginNS, err := getCurrentNS() + if err != nil { + return false, types.NewError(types.ErrInvalidNetNS, "get plugin's netns failed", "") + } + defer pluginNS.Close() + + return pluginNS.Equal(ns), nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go b/vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go new file mode 100644 index 000000000..cffe13617 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go @@ -0,0 +1,21 @@ +// Copyright 2022 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import "github.com/containernetworking/cni/pkg/types" + +func CheckNetNS(nsPath string) (bool, *types.Error) { + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/skel/skel.go b/vendor/github.com/containernetworking/cni/pkg/skel/skel.go index cb8781972..f29cf3459 100644 --- a/vendor/github.com/containernetworking/cni/pkg/skel/skel.go +++ b/vendor/github.com/containernetworking/cni/pkg/skel/skel.go @@ -19,13 +19,14 @@ package skel import ( "bytes" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "log" "os" "strings" + "github.com/containernetworking/cni/pkg/ns" "github.com/containernetworking/cni/pkg/types" "github.com/containernetworking/cni/pkg/utils" "github.com/containernetworking/cni/pkg/version" @@ -34,12 +35,13 @@ import ( // CmdArgs captures all the arguments passed in to the plugin // via both env vars and stdin type CmdArgs struct { - ContainerID string - Netns string - IfName string - Args string - Path string - StdinData []byte + ContainerID string + Netns string + IfName string + Args string + Path string + NetnsOverride string + StdinData []byte } type dispatcher struct { @@ -55,21 +57,25 @@ type dispatcher struct { type reqForCmdEntry map[string]bool func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { - var cmd, contID, netns, ifName, args, path string + var cmd, contID, netns, ifName, args, path, netnsOverride string vars := []struct { - name string - val *string - reqForCmd reqForCmdEntry + name string + val *string + reqForCmd reqForCmdEntry + validateFn func(string) *types.Error }{ { "CNI_COMMAND", &cmd, reqForCmdEntry{ - "ADD": true, - "CHECK": true, - "DEL": true, + "ADD": true, + "CHECK": true, + "DEL": true, + "GC": true, + "STATUS": true, }, + nil, }, { "CNI_CONTAINERID", @@ -79,6 +85,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": true, "DEL": true, }, + utils.ValidateContainerID, }, { "CNI_NETNS", @@ -88,6 +95,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": true, "DEL": false, }, + nil, }, { "CNI_IFNAME", @@ -97,6 +105,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": true, "DEL": true, }, + utils.ValidateInterfaceName, }, { "CNI_ARGS", @@ -106,15 +115,29 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": false, "DEL": false, }, + nil, }, { "CNI_PATH", &path, reqForCmdEntry{ - "ADD": true, - "CHECK": true, - "DEL": true, + "ADD": true, + "CHECK": true, + "DEL": true, + "GC": true, + "STATUS": true, + }, + nil, + }, + { + "CNI_NETNS_OVERRIDE", + &netnsOverride, + reqForCmdEntry{ + "ADD": false, + "CHECK": false, + "DEL": false, }, + nil, }, } @@ -125,6 +148,10 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { if v.reqForCmd[cmd] || v.name == "CNI_COMMAND" { argsMissing = append(argsMissing, v.name) } + } else if v.reqForCmd[cmd] && v.validateFn != nil { + if err := v.validateFn(*v.val); err != nil { + return "", nil, err + } } } @@ -137,18 +164,25 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { t.Stdin = bytes.NewReader(nil) } - stdinData, err := ioutil.ReadAll(t.Stdin) + stdinData, err := io.ReadAll(t.Stdin) if err != nil { return "", nil, types.NewError(types.ErrIOFailure, fmt.Sprintf("error reading from stdin: %v", err), "") } + if cmd != "VERSION" { + if err := validateConfig(stdinData); err != nil { + return "", nil, err + } + } + cmdArgs := &CmdArgs{ - ContainerID: contID, - Netns: netns, - IfName: ifName, - Args: args, - Path: path, - StdinData: stdinData, + ContainerID: contID, + Netns: netns, + IfName: ifName, + Args: args, + Path: path, + StdinData: stdinData, + NetnsOverride: netnsOverride, } return cmd, cmdArgs, nil } @@ -163,8 +197,13 @@ func (t *dispatcher) checkVersionAndCall(cmdArgs *CmdArgs, pluginVersionInfo ver return types.NewError(types.ErrIncompatibleCNIVersion, "incompatible CNI versions", verErr.Details()) } + if toCall == nil { + return nil + } + if err = toCall(cmdArgs); err != nil { - if e, ok := err.(*types.Error); ok { + var e *types.Error + if errors.As(err, &e) { // don't wrap Error in Error return e } @@ -190,7 +229,7 @@ func validateConfig(jsonBytes []byte) *types.Error { return nil } -func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) *types.Error { +func (t *dispatcher) pluginMain(funcs CNIFuncs, versionInfo version.PluginInfo, about string) *types.Error { cmd, cmdArgs, err := t.getCmdArgsFromEnv() if err != nil { // Print the about string to stderr when no command is set @@ -202,21 +241,20 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, return err } - if cmd != "VERSION" { - if err = validateConfig(cmdArgs.StdinData); err != nil { - return err - } - if err = utils.ValidateContainerID(cmdArgs.ContainerID); err != nil { + switch cmd { + case "ADD": + err = t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Add) + if err != nil { return err } - if err = utils.ValidateInterfaceName(cmdArgs.IfName); err != nil { - return err + if strings.ToUpper(cmdArgs.NetnsOverride) != "TRUE" && cmdArgs.NetnsOverride != "1" { + isPluginNetNS, checkErr := ns.CheckNetNS(cmdArgs.Netns) + if checkErr != nil { + return checkErr + } else if isPluginNetNS { + return types.NewError(types.ErrInvalidNetNS, "plugin's netns and netns from CNI_NETNS should not be the same", "") + } } - } - - switch cmd { - case "ADD": - err = t.checkVersionAndCall(cmdArgs, versionInfo, cmdAdd) case "CHECK": configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData) if err != nil { @@ -232,7 +270,7 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, if err != nil { return types.NewError(types.ErrDecodingFailure, err.Error(), "") } else if gtet { - if err := t.checkVersionAndCall(cmdArgs, versionInfo, cmdCheck); err != nil { + if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Check); err != nil { return err } return nil @@ -240,7 +278,62 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, } return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow CHECK", "") case "DEL": - err = t.checkVersionAndCall(cmdArgs, versionInfo, cmdDel) + err = t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Del) + if err != nil { + return err + } + if strings.ToUpper(cmdArgs.NetnsOverride) != "TRUE" && cmdArgs.NetnsOverride != "1" { + isPluginNetNS, checkErr := ns.CheckNetNS(cmdArgs.Netns) + if checkErr != nil { + return checkErr + } else if isPluginNetNS { + return types.NewError(types.ErrInvalidNetNS, "plugin's netns and netns from CNI_NETNS should not be the same", "") + } + } + case "GC": + configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } + if gtet, err := version.GreaterThanOrEqualTo(configVersion, "1.1.0"); err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if !gtet { + return types.NewError(types.ErrIncompatibleCNIVersion, "config version does not allow GC", "") + } + for _, pluginVersion := range versionInfo.SupportedVersions() { + gtet, err := version.GreaterThanOrEqualTo(pluginVersion, configVersion) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if gtet { + if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.GC); err != nil { + return err + } + return nil + } + } + return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow GC", "") + case "STATUS": + configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } + if gtet, err := version.GreaterThanOrEqualTo(configVersion, "1.1.0"); err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if !gtet { + return types.NewError(types.ErrIncompatibleCNIVersion, "config version does not allow STATUS", "") + } + for _, pluginVersion := range versionInfo.SupportedVersions() { + gtet, err := version.GreaterThanOrEqualTo(pluginVersion, configVersion) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if gtet { + if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Status); err != nil { + return err + } + return nil + } + } + return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow STATUS", "") case "VERSION": if err := versionInfo.Encode(t.Stdout); err != nil { return types.NewError(types.ErrIOFailure, err.Error(), "") @@ -264,13 +357,63 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, // // To let this package automatically handle errors and call os.Exit(1) for you, // use PluginMain() instead. +// +// Deprecated: Use github.com/containernetworking/cni/pkg/skel.PluginMainFuncsWithError instead. func PluginMainWithError(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) *types.Error { + return PluginMainFuncsWithError(CNIFuncs{Add: cmdAdd, Check: cmdCheck, Del: cmdDel}, versionInfo, about) +} + +// CNIFuncs contains a group of callback command funcs to be passed in as +// parameters to the core "main" for a plugin. +type CNIFuncs struct { + Add func(_ *CmdArgs) error + Del func(_ *CmdArgs) error + Check func(_ *CmdArgs) error + GC func(_ *CmdArgs) error + Status func(_ *CmdArgs) error +} + +// PluginMainFuncsWithError is the core "main" for a plugin. It accepts +// callback functions defined within CNIFuncs and returns an error. +// +// The caller must also specify what CNI spec versions the plugin supports. +// +// It is the responsibility of the caller to check for non-nil error return. +// +// For a plugin to comply with the CNI spec, it must print any error to stdout +// as JSON and then exit with nonzero status code. +// +// To let this package automatically handle errors and call os.Exit(1) for you, +// use PluginMainFuncs() instead. +func PluginMainFuncsWithError(funcs CNIFuncs, versionInfo version.PluginInfo, about string) *types.Error { return (&dispatcher{ Getenv: os.Getenv, Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, - }).pluginMain(cmdAdd, cmdCheck, cmdDel, versionInfo, about) + }).pluginMain(funcs, versionInfo, about) +} + +// PluginMainFuncs is the core "main" for a plugin which includes automatic error handling. +// This is a newer alternative func to PluginMain which abstracts CNI commands within a +// CNIFuncs interface. +// +// The caller must also specify what CNI spec versions the plugin supports. +// +// The caller can specify an "about" string, which is printed on stderr +// when no CNI_COMMAND is specified. The recommended output is "CNI plugin v" +// +// When an error occurs in any func in CNIFuncs, PluginMainFuncs will print the error +// as JSON to stdout and call os.Exit(1). +// +// To have more control over error handling, use PluginMainFuncsWithError() instead. +func PluginMainFuncs(funcs CNIFuncs, versionInfo version.PluginInfo, about string) { + if e := PluginMainFuncsWithError(funcs, versionInfo, about); e != nil { + if err := e.Print(); err != nil { + log.Print("Error writing error JSON to stdout: ", err) + } + os.Exit(1) + } } // PluginMain is the core "main" for a plugin which includes automatic error handling. @@ -284,6 +427,8 @@ func PluginMainWithError(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versio // as JSON to stdout and call os.Exit(1). // // To have more control over error handling, use PluginMainWithError() instead. +// +// Deprecated: Use github.com/containernetworking/cni/pkg/skel.PluginMainFuncs instead. func PluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) { if e := PluginMainWithError(cmdAdd, cmdCheck, cmdDel, versionInfo, about); e != nil { if err := e.Print(); err != nil { diff --git a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go index 0e1e8b857..f58b91206 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go @@ -26,9 +26,10 @@ import ( convert "github.com/containernetworking/cni/pkg/types/internal" ) -const ImplementedSpecVersion string = "1.0.0" +// The types did not change between v1.0 and v1.1 +const ImplementedSpecVersion string = "1.1.0" -var supportedVersions = []string{ImplementedSpecVersion} +var supportedVersions = []string{"1.0.0", "1.1.0"} // Register converters for all versions less than the implemented spec version func init() { @@ -38,10 +39,14 @@ func init() { convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x) convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x) convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x) + convert.RegisterConverter("1.0.0", []string{"1.1.0"}, convertFrom100) // Down-converters convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("1.1.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) + convert.RegisterConverter("1.1.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("1.1.0", []string{"1.0.0"}, convertFrom100) // Creator convert.RegisterCreator(supportedVersions, NewResult) @@ -90,12 +95,49 @@ type Result struct { DNS types.DNS `json:"dns,omitempty"` } +// Note: DNS should be omit if DNS is empty but default Marshal function +// will output empty structure hence need to write a Marshal function +func (r *Result) MarshalJSON() ([]byte, error) { + // use type alias to escape recursion for json.Marshal() to MarshalJSON() + type fixObjType = Result + + bytes, err := json.Marshal(fixObjType(*r)) //nolint:all + if err != nil { + return nil, err + } + + fixupObj := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &fixupObj); err != nil { + return nil, err + } + + if r.DNS.IsEmpty() { + delete(fixupObj, "dns") + } + + return json.Marshal(fixupObj) +} + +// convertFrom100 does nothing except set the version; the types are the same +func convertFrom100(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + + result := &Result{ + CNIVersion: toVersion, + Interfaces: fromResult.Interfaces, + IPs: fromResult.IPs, + Routes: fromResult.Routes, + DNS: fromResult.DNS, + } + return result, nil +} + func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { result040, err := convert.Convert(from, "0.4.0") if err != nil { return nil, err } - result100, err := convertFrom04x(result040, ImplementedSpecVersion) + result100, err := convertFrom04x(result040, toVersion) if err != nil { return nil, err } @@ -226,9 +268,12 @@ func (r *Result) PrintTo(writer io.Writer) error { // Interface contains values about the created interfaces type Interface struct { - Name string `json:"name"` - Mac string `json:"mac,omitempty"` - Sandbox string `json:"sandbox,omitempty"` + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Mtu int `json:"mtu,omitempty"` + Sandbox string `json:"sandbox,omitempty"` + SocketPath string `json:"socketPath,omitempty"` + PciID string `json:"pciID,omitempty"` } func (i *Interface) String() string { diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go index 7516f03ef..68a602bfd 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/args.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -26,8 +26,8 @@ import ( type UnmarshallableBool bool // UnmarshalText implements the encoding.TextUnmarshaler interface. -// Returns boolean true if the string is "1" or "[Tt]rue" -// Returns boolean false if the string is "0" or "[Ff]alse" +// Returns boolean true if the string is "1" or "true" or "True" +// Returns boolean false if the string is "0" or "false" or "False” func (b *UnmarshallableBool) UnmarshalText(data []byte) error { s := strings.ToLower(string(data)) switch s { diff --git a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go index ed28b33e8..452cb6220 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go @@ -19,6 +19,9 @@ import ( "fmt" "github.com/containernetworking/cni/pkg/types" + _ "github.com/containernetworking/cni/pkg/types/020" + _ "github.com/containernetworking/cni/pkg/types/040" + _ "github.com/containernetworking/cni/pkg/types/100" convert "github.com/containernetworking/cni/pkg/types/internal" ) diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go index fba17dfc0..193ac46ef 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -64,16 +64,55 @@ type NetConf struct { Type string `json:"type,omitempty"` Capabilities map[string]bool `json:"capabilities,omitempty"` IPAM IPAM `json:"ipam,omitempty"` - DNS DNS `json:"dns"` + DNS DNS `json:"dns,omitempty"` RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` PrevResult Result `json:"-"` + + // ValidAttachments is only supplied when executing a GC operation + ValidAttachments []GCAttachment `json:"cni.dev/valid-attachments,omitempty"` +} + +// GCAttachment is the parameters to a GC call -- namely, +// the container ID and ifname pair that represents a +// still-valid attachment. +type GCAttachment struct { + ContainerID string `json:"containerID"` + IfName string `json:"ifname"` +} + +// Note: DNS should be omit if DNS is empty but default Marshal function +// will output empty structure hence need to write a Marshal function +func (n *NetConf) MarshalJSON() ([]byte, error) { + // use type alias to escape recursion for json.Marshal() to MarshalJSON() + type fixObjType = NetConf + + bytes, err := json.Marshal(fixObjType(*n)) //nolint:all + if err != nil { + return nil, err + } + + fixupObj := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &fixupObj); err != nil { + return nil, err + } + + if n.DNS.IsEmpty() { + delete(fixupObj, "dns") + } + + return json.Marshal(fixupObj) } type IPAM struct { Type string `json:"type,omitempty"` } +// IsEmpty returns true if IPAM structure has no value, otherwise return false +func (i *IPAM) IsEmpty() bool { + return i.Type == "" +} + // NetConfList describes an ordered list of networks. type NetConfList struct { CNIVersion string `json:"cniVersion,omitempty"` @@ -116,31 +155,48 @@ type DNS struct { Options []string `json:"options,omitempty"` } +// IsEmpty returns true if DNS structure has no value, otherwise return false +func (d *DNS) IsEmpty() bool { + if len(d.Nameservers) == 0 && d.Domain == "" && len(d.Search) == 0 && len(d.Options) == 0 { + return true + } + return false +} + func (d *DNS) Copy() *DNS { if d == nil { return nil } to := &DNS{Domain: d.Domain} - for _, ns := range d.Nameservers { - to.Nameservers = append(to.Nameservers, ns) - } - for _, s := range d.Search { - to.Search = append(to.Search, s) - } - for _, o := range d.Options { - to.Options = append(to.Options, o) - } + to.Nameservers = append(to.Nameservers, d.Nameservers...) + to.Search = append(to.Search, d.Search...) + to.Options = append(to.Options, d.Options...) return to } type Route struct { - Dst net.IPNet - GW net.IP + Dst net.IPNet + GW net.IP + MTU int + AdvMSS int + Priority int + Table *int + Scope *int } func (r *Route) String() string { - return fmt.Sprintf("%+v", *r) + table := "" + if r.Table != nil { + table = fmt.Sprintf("%d", *r.Table) + } + + scope := "" + if r.Scope != nil { + scope = fmt.Sprintf("%d", *r.Scope) + } + + return fmt.Sprintf("{Dst:%+v GW:%v MTU:%d AdvMSS:%d Priority:%d Table:%s Scope:%s}", r.Dst, r.GW, r.MTU, r.AdvMSS, r.Priority, table, scope) } func (r *Route) Copy() *Route { @@ -148,14 +204,30 @@ func (r *Route) Copy() *Route { return nil } - return &Route{ - Dst: r.Dst, - GW: r.GW, + route := &Route{ + Dst: r.Dst, + GW: r.GW, + MTU: r.MTU, + AdvMSS: r.AdvMSS, + Priority: r.Priority, + Scope: r.Scope, + } + + if r.Table != nil { + table := *r.Table + route.Table = &table } + + if r.Scope != nil { + scope := *r.Scope + route.Scope = &scope + } + + return route } // Well known error codes -// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes +// see https://github.com/containernetworking/cni/blob/main/SPEC.md#well-known-error-codes const ( ErrUnknown uint = iota // 0 ErrIncompatibleCNIVersion // 1 @@ -165,6 +237,7 @@ const ( ErrIOFailure // 5 ErrDecodingFailure // 6 ErrInvalidNetworkConfig // 7 + ErrInvalidNetNS // 8 ErrTryAgainLater uint = 11 ErrInternal uint = 999 ) @@ -200,8 +273,13 @@ func (e *Error) Print() error { // JSON (un)marshallable types type route struct { - Dst IPNet `json:"dst"` - GW net.IP `json:"gw,omitempty"` + Dst IPNet `json:"dst"` + GW net.IP `json:"gw,omitempty"` + MTU int `json:"mtu,omitempty"` + AdvMSS int `json:"advmss,omitempty"` + Priority int `json:"priority,omitempty"` + Table *int `json:"table,omitempty"` + Scope *int `json:"scope,omitempty"` } func (r *Route) UnmarshalJSON(data []byte) error { @@ -212,13 +290,24 @@ func (r *Route) UnmarshalJSON(data []byte) error { r.Dst = net.IPNet(rt.Dst) r.GW = rt.GW + r.MTU = rt.MTU + r.AdvMSS = rt.AdvMSS + r.Priority = rt.Priority + r.Table = rt.Table + r.Scope = rt.Scope + return nil } func (r Route) MarshalJSON() ([]byte, error) { rt := route{ - Dst: IPNet(r.Dst), - GW: r.GW, + Dst: IPNet(r.Dst), + GW: r.GW, + MTU: r.MTU, + AdvMSS: r.AdvMSS, + Priority: r.Priority, + Table: r.Table, + Scope: r.Scope, } return json.Marshal(rt) diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go index b8ec38874..1981d2556 100644 --- a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go +++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go @@ -36,7 +36,6 @@ var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) // ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters func ValidateContainerID(containerID string) *types.Error { - if containerID == "" { return types.NewError(types.ErrUnknownContainer, "missing containerID", "") } @@ -48,7 +47,6 @@ func ValidateContainerID(containerID string) *types.Error { // ValidateNetworkName will validate that the supplied networkName does not contain invalid characters func ValidateNetworkName(networkName string) *types.Error { - if networkName == "" { return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") } @@ -58,11 +56,11 @@ func ValidateNetworkName(networkName string) *types.Error { return nil } -// ValidateInterfaceName will validate the interface name based on the three rules below +// ValidateInterfaceName will validate the interface name based on the four rules below // 1. The name must not be empty // 2. The name must be less than 16 characters // 3. The name must not be "." or ".." -// 3. The name must not contain / or : or any whitespace characters +// 4. The name must not contain / or : or any whitespace characters // ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 func ValidateInterfaceName(ifName string) *types.Error { if len(ifName) == 0 { diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go index 1326f8038..a4d442c8e 100644 --- a/vendor/github.com/containernetworking/cni/pkg/version/version.go +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -19,13 +19,12 @@ import ( "fmt" "github.com/containernetworking/cni/pkg/types" - types100 "github.com/containernetworking/cni/pkg/types/100" "github.com/containernetworking/cni/pkg/types/create" ) // Current reports the version of the CNI spec implemented by this library func Current() string { - return types100.ImplementedSpecVersion + return "1.1.0" } // Legacy PluginInfo describes a plugin that is backwards compatible with the @@ -35,8 +34,10 @@ func Current() string { // // Any future CNI spec versions which meet this definition should be added to // this list. -var Legacy = PluginSupports("0.1.0", "0.2.0") -var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0") +var ( + Legacy = PluginSupports("0.1.0", "0.2.0") + All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0", "1.1.0") +) // VersionsFrom returns a list of versions starting from min, inclusive func VersionsStartingFrom(min string) PluginInfo { diff --git a/vendor/github.com/d2g/dhcp4server/LICENSE b/vendor/github.com/d2g/dhcp4server/LICENSE deleted file mode 100644 index c33dcc7c9..000000000 --- a/vendor/github.com/d2g/dhcp4server/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/d2g/dhcp4server/README.md b/vendor/github.com/d2g/dhcp4server/README.md deleted file mode 100644 index 92230542b..000000000 --- a/vendor/github.com/d2g/dhcp4server/README.md +++ /dev/null @@ -1,4 +0,0 @@ -dhcp4server [![GoDoc](https://godoc.org/github.com/d2g/dhcp4server?status.svg)](http://godoc.org/github.com/d2g/dhcp4server) [![Coverage Status](https://coveralls.io/repos/d2g/dhcp4server/badge.svg)](https://coveralls.io/r/d2g/dhcp4server) [![Codeship Status for d2g/dhcp4server](https://codeship.com/projects/ff96ded0-89cb-0132-8209-4635861fb902/status?branch=master)](https://codeship.com/projects/59804) -=========== - -DHCP Server diff --git a/vendor/github.com/d2g/dhcp4server/leasepool/lease.go b/vendor/github.com/d2g/dhcp4server/leasepool/lease.go deleted file mode 100644 index 98306eca1..000000000 --- a/vendor/github.com/d2g/dhcp4server/leasepool/lease.go +++ /dev/null @@ -1,102 +0,0 @@ -package leasepool - -import ( - "bytes" - "encoding/hex" - "encoding/json" - "fmt" - "net" - "time" -) - -type LeaseStatus int - -const ( - Free LeaseStatus = 0 - Reserved LeaseStatus = 1 - Active LeaseStatus = 2 -) - -type Lease struct { - IP net.IP //The IP of the Lease - Status LeaseStatus //Are Reserved, Active or Free - MACAddress net.HardwareAddr //Mac Address of the Device - ClientID []byte //ClientID of the request - Hostname string //Hostname From option 12 - Expiry time.Time //Expiry Time -} - -//leaseMarshal is a mirror of Lease used for marshalling, since -//net.HardwareAddr has no native marshalling capability. -type leaseMarshal struct { - IP string - Status int - MACAddress string - ClientID string - Hostname string - Expiry time.Time -} - -func (this Lease) MarshalJSON() ([]byte, error) { - return json.Marshal(leaseMarshal{ - IP: this.IP.String(), - Status: int(this.Status), - MACAddress: this.MACAddress.String(), - ClientID: hex.EncodeToString(this.ClientID), - Hostname: this.Hostname, - Expiry: this.Expiry, - }) -} - -func (this *Lease) UnmarshalJSON(data []byte) error { - stringUnMarshal := leaseMarshal{} - err := json.Unmarshal(data, &stringUnMarshal) - if err != nil { - return err - } - - this.IP = net.ParseIP(stringUnMarshal.IP) - this.Status = LeaseStatus(stringUnMarshal.Status) - if stringUnMarshal.MACAddress != "" { - this.MACAddress, err = net.ParseMAC(stringUnMarshal.MACAddress) - if err != nil { - return fmt.Errorf("error parsing MAC address: %v", err) - } - } - this.ClientID, err = hex.DecodeString(stringUnMarshal.ClientID) - if err != nil { - return fmt.Errorf("error decoding clientID: %v", err) - } - this.Hostname = stringUnMarshal.Hostname - this.Expiry = stringUnMarshal.Expiry - - return nil -} - -func (this Lease) Equal(other Lease) bool { - if !this.IP.Equal(other.IP) { - return false - } - - if int(this.Status) != int(other.Status) { - return false - } - - if this.MACAddress.String() != other.MACAddress.String() { - return false - } - - if !bytes.Equal(this.ClientID, other.ClientID) { - return false - } - - if this.Hostname != other.Hostname { - return false - } - - if !this.Expiry.Equal(other.Expiry) { - return false - } - - return true -} diff --git a/vendor/github.com/tidwall/gjson/LICENSE b/vendor/github.com/tidwall/gjson/LICENSE new file mode 100644 index 000000000..58f5819a4 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md new file mode 100644 index 000000000..96b2e4dc3 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/README.md @@ -0,0 +1,488 @@ +

+GJSON +
+GoDoc +GJSON Playground +GJSON Syntax + +

+ +

get json values quickly

+ +GJSON is a Go package that provides a [fast](#performance) and [simple](#get-a-value) way to get values from a json document. +It has features such as [one line retrieval](#get-a-value), [dot notation paths](#path-syntax), [iteration](#iterate-through-an-object-or-array), and [parsing json lines](#json-lines). + +Also check out [SJSON](https://github.com/tidwall/sjson) for modifying json, and the [JJ](https://github.com/tidwall/jj) command line tool. + +This README is a quick overview of how to use GJSON, for more information check out [GJSON Syntax](SYNTAX.md). + +GJSON is also available for [Python](https://github.com/volans-/gjson-py) and [Rust](https://github.com/tidwall/gjson.rs) + +Getting Started +=============== + +## Installing + +To start using GJSON, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/gjson +``` + +This will retrieve the library. + +## Get a value +Get searches json for the specified path. A path is in dot syntax, such as "name.last" or "age". When the value is found it's returned immediately. + +```go +package main + +import "github.com/tidwall/gjson" + +const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}` + +func main() { + value := gjson.Get(json, "name.last") + println(value.String()) +} +``` + +This will print: + +``` +Prichard +``` +*There's also the [GetMany](#get-multiple-values-at-once) function to get multiple values at once, and [GetBytes](#working-with-bytes) for working with JSON byte slices.* + +## Path Syntax + +Below is a quick overview of the path syntax, for more complete information please +check out [GJSON Syntax](SYNTAX.md). + +A path is a series of keys separated by a dot. +A key may contain special wildcard characters '\*' and '?'. +To access an array value use the index as the key. +To get the number of elements in an array or to access a child path, use the '#' character. +The dot and wildcard characters can be escaped with '\\'. + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]}, + {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]}, + {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} + ] +} +``` +``` +"name.last" >> "Anderson" +"age" >> 37 +"children" >> ["Sara","Alex","Jack"] +"children.#" >> 3 +"children.1" >> "Alex" +"child*.2" >> "Jack" +"c?ildren.0" >> "Sara" +"fav\.movie" >> "Deer Hunter" +"friends.#.first" >> ["Dale","Roger","Jane"] +"friends.1.last" >> "Craig" +``` + +You can also query an array for the first match by using `#(...)`, or find all +matches with `#(...)#`. Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` +comparison operators and the simple pattern matching `%` (like) and `!%` +(not like) operators. + +``` +friends.#(last=="Murphy").first >> "Dale" +friends.#(last=="Murphy")#.first >> ["Dale","Jane"] +friends.#(age>45)#.last >> ["Craig","Murphy"] +friends.#(first%"D*").last >> "Murphy" +friends.#(first!%"D*").last >> "Craig" +friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"] +``` + +*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was +changed in v1.3.0 as to avoid confusion with the new +[multipath](SYNTAX.md#multipaths) syntax. For backwards compatibility, +`#[...]` will continue to work until the next major release.* + +## Result Type + +GJSON supports the json types `string`, `number`, `bool`, and `null`. +Arrays and Objects are returned as their raw json types. + +The `Result` type holds one of these: + +``` +bool, for JSON booleans +float64, for JSON numbers +string, for JSON string literals +nil, for JSON null +``` + +To directly access the value: + +```go +result.Type // can be String, Number, True, False, Null, or JSON +result.Str // holds the string +result.Num // holds the float64 number +result.Raw // holds the raw json +result.Index // index of raw value in original json, zero means index unknown +result.Indexes // indexes of all the elements that match on a path containing the '#' query character. +``` + +There are a variety of handy functions that work on a result: + +```go +result.Exists() bool +result.Value() interface{} +result.Int() int64 +result.Uint() uint64 +result.Float() float64 +result.String() string +result.Bool() bool +result.Time() time.Time +result.Array() []gjson.Result +result.Map() map[string]gjson.Result +result.Get(path string) Result +result.ForEach(iterator func(key, value Result) bool) +result.Less(token Result, caseSensitive bool) bool +``` + +The `result.Value()` function returns an `interface{}` which requires type assertion and is one of the following Go types: + +```go +boolean >> bool +number >> float64 +string >> string +null >> nil +array >> []interface{} +object >> map[string]interface{} +``` + +The `result.Array()` function returns back an array of values. +If the result represents a non-existent value, then an empty array will be returned. +If the result is not a JSON array, the return value will be an array containing one result. + +### 64-bit integers + +The `result.Int()` and `result.Uint()` calls are capable of reading all 64 bits, allowing for large JSON integers. + +```go +result.Int() int64 // -9223372036854775808 to 9223372036854775807 +result.Uint() uint64 // 0 to 18446744073709551615 +``` + +## Modifiers and path chaining + +New in version 1.2 is support for modifier functions and path chaining. + +A modifier is a path component that performs custom processing on the +json. + +Multiple paths can be "chained" together using the pipe character. +This is useful for getting results from a modified query. + +For example, using the built-in `@reverse` modifier on the above json document, +we'll get `children` array and reverse the order: + +``` +"children|@reverse" >> ["Jack","Alex","Sara"] +"children|@reverse|0" >> "Jack" +``` + +There are currently the following built-in modifiers: + +- `@reverse`: Reverse an array or the members of an object. +- `@ugly`: Remove all whitespace from a json document. +- `@pretty`: Make the json document more human readable. +- `@this`: Returns the current element. It can be used to retrieve the root element. +- `@valid`: Ensure the json document is valid. +- `@flatten`: Flattens an array. +- `@join`: Joins multiple objects into a single object. +- `@keys`: Returns an array of keys for an object. +- `@values`: Returns an array of values for an object. +- `@tostr`: Converts json to a string. Wraps a json string. +- `@fromstr`: Converts a string from json. Unwraps a json string. +- `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db). +- `@dig`: Search for a value without providing its entire path. See [e8e87f2](https://github.com/tidwall/gjson/commit/e8e87f2a00dc41f3aba5631094e21f59a8cf8cbf). + +### Modifier arguments + +A modifier may accept an optional argument. The argument can be a valid JSON +document or just characters. + +For example, the `@pretty` modifier takes a json object as its argument. + +``` +@pretty:{"sortKeys":true} +``` + +Which makes the json pretty and orders all of its keys. + +```json +{ + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"age": 44, "first": "Dale", "last": "Murphy"}, + {"age": 68, "first": "Roger", "last": "Craig"}, + {"age": 47, "first": "Jane", "last": "Murphy"} + ], + "name": {"first": "Tom", "last": "Anderson"} +} +``` + +*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`. +Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.* + +### Custom modifiers + +You can also add custom modifiers. + +For example, here we create a modifier that makes the entire json document upper +or lower case. + +```go +gjson.AddModifier("case", func(json, arg string) string { + if arg == "upper" { + return strings.ToUpper(json) + } + if arg == "lower" { + return strings.ToLower(json) + } + return json +}) +``` + +``` +"children|@case:upper" >> ["SARA","ALEX","JACK"] +"children|@case:lower|@reverse" >> ["jack","alex","sara"] +``` + +## JSON Lines + +There's support for [JSON Lines](http://jsonlines.org/) using the `..` prefix, which treats a multilined document as an array. + +For example: + +``` +{"name": "Gilbert", "age": 61} +{"name": "Alexa", "age": 34} +{"name": "May", "age": 57} +{"name": "Deloise", "age": 44} +``` + +``` +..# >> 4 +..1 >> {"name": "Alexa", "age": 34} +..3 >> {"name": "Deloise", "age": 44} +..#.name >> ["Gilbert","Alexa","May","Deloise"] +..#(name="May").age >> 57 +``` + +The `ForEachLines` function will iterate through JSON lines. + +```go +gjson.ForEachLine(json, func(line gjson.Result) bool{ + println(line.String()) + return true +}) +``` + +## Get nested array values + +Suppose you want all the last names from the following json: + +```json +{ + "programmers": [ + { + "firstName": "Janet", + "lastName": "McLaughlin", + }, { + "firstName": "Elliotte", + "lastName": "Hunter", + }, { + "firstName": "Jason", + "lastName": "Harold", + } + ] +} +``` + +You would use the path "programmers.#.lastName" like such: + +```go +result := gjson.Get(json, "programmers.#.lastName") +for _, name := range result.Array() { + println(name.String()) +} +``` + +You can also query an object inside an array: + +```go +name := gjson.Get(json, `programmers.#(lastName="Hunter").firstName`) +println(name.String()) // prints "Elliotte" +``` + +## Iterate through an object or array + +The `ForEach` function allows for quickly iterating through an object or array. +The key and value are passed to the iterator function for objects. +Only the value is passed for arrays. +Returning `false` from an iterator will stop iteration. + +```go +result := gjson.Get(json, "programmers") +result.ForEach(func(key, value gjson.Result) bool { + println(value.String()) + return true // keep iterating +}) +``` + +## Simple Parse and Get + +There's a `Parse(json)` function that will do a simple parse, and `result.Get(path)` that will search a result. + +For example, all of these will return the same result: + +```go +gjson.Parse(json).Get("name").Get("last") +gjson.Get(json, "name").Get("last") +gjson.Get(json, "name.last") +``` + +## Check for the existence of a value + +Sometimes you just want to know if a value exists. + +```go +value := gjson.Get(json, "name.last") +if !value.Exists() { + println("no last name") +} else { + println(value.String()) +} + +// Or as one step +if gjson.Get(json, "name.last").Exists() { + println("has a last name") +} +``` + +## Validate JSON + +The `Get*` and `Parse*` functions expects that the json is well-formed. Bad json will not panic, but it may return back unexpected results. + +If you are consuming JSON from an unpredictable source then you may want to validate prior to using GJSON. + +```go +if !gjson.Valid(json) { + return errors.New("invalid json") +} +value := gjson.Get(json, "name.last") +``` + +## Unmarshal to a map + +To unmarshal to a `map[string]interface{}`: + +```go +m, ok := gjson.Parse(json).Value().(map[string]interface{}) +if !ok { + // not a map +} +``` + +## Working with Bytes + +If your JSON is contained in a `[]byte` slice, there's the [GetBytes](https://godoc.org/github.com/tidwall/gjson#GetBytes) function. This is preferred over `Get(string(data), path)`. + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +``` + +If you are using the `gjson.GetBytes(json, path)` function and you want to avoid converting `result.Raw` to a `[]byte`, then you can use this pattern: + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +var raw []byte +if result.Index > 0 { + raw = json[result.Index:result.Index+len(result.Raw)] +} else { + raw = []byte(result.Raw) +} +``` + +This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`. + +## Performance + +Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), +[ffjson](https://github.com/pquerna/ffjson), +[EasyJSON](https://github.com/mailru/easyjson), +[jsonparser](https://github.com/buger/jsonparser), +and [json-iterator](https://github.com/json-iterator/go) + +``` +BenchmarkGJSONGet-16 11644512 311 ns/op 0 B/op 0 allocs/op +BenchmarkGJSONUnmarshalMap-16 1122678 3094 ns/op 1920 B/op 26 allocs/op +BenchmarkJSONUnmarshalMap-16 516681 6810 ns/op 2944 B/op 69 allocs/op +BenchmarkJSONUnmarshalStruct-16 697053 5400 ns/op 928 B/op 13 allocs/op +BenchmarkJSONDecoder-16 330450 10217 ns/op 3845 B/op 160 allocs/op +BenchmarkFFJSONLexer-16 1424979 2585 ns/op 880 B/op 8 allocs/op +BenchmarkEasyJSONLexer-16 3000000 729 ns/op 501 B/op 5 allocs/op +BenchmarkJSONParserGet-16 3000000 366 ns/op 21 B/op 0 allocs/op +BenchmarkJSONIterator-16 3000000 869 ns/op 693 B/op 14 allocs/op +``` + +JSON document used: + +```json +{ + "widget": { + "debug": "on", + "window": { + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }, + "image": { + "src": "Images/Sun.png", + "hOffset": 250, + "vOffset": 250, + "alignment": "center" + }, + "text": { + "data": "Click Here", + "size": 36, + "style": "bold", + "vOffset": 100, + "alignment": "center", + "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" + } + } +} +``` + +Each operation was rotated through one of the following search paths: + +``` +widget.window.name +widget.image.hOffset +widget.text.onMouseUp +``` + +*These benchmarks were run on a MacBook Pro 16" 2.4 GHz Intel Core i9 using Go 1.17 and can be found [here](https://github.com/tidwall/gjson-benchmarks).* diff --git a/vendor/github.com/tidwall/gjson/SYNTAX.md b/vendor/github.com/tidwall/gjson/SYNTAX.md new file mode 100644 index 000000000..6721d7f51 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/SYNTAX.md @@ -0,0 +1,360 @@ +# GJSON Path Syntax + +A GJSON Path is a text string syntax that describes a search pattern for quickly retreiving values from a JSON payload. + +This document is designed to explain the structure of a GJSON Path through examples. + +- [Path structure](#path-structure) +- [Basic](#basic) +- [Wildcards](#wildcards) +- [Escape Character](#escape-character) +- [Arrays](#arrays) +- [Queries](#queries) +- [Dot vs Pipe](#dot-vs-pipe) +- [Modifiers](#modifiers) +- [Multipaths](#multipaths) +- [Literals](#literals) + +The definitive implemenation is [github.com/tidwall/gjson](https://github.com/tidwall/gjson). +Use the [GJSON Playground](https://gjson.dev) to experiment with the syntax online. + +## Path structure + +A GJSON Path is intended to be easily expressed as a series of components seperated by a `.` character. + +Along with `.` character, there are a few more that have special meaning, including `|`, `#`, `@`, `\`, `*`, `!`, and `?`. + +## Example + +Given this JSON + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]}, + {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]}, + {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} + ] +} +``` + +The following GJSON Paths evaluate to the accompanying values. + +### Basic + +In many cases you'll just want to retreive values by object name or array index. + +```go +name.last "Anderson" +name.first "Tom" +age 37 +children ["Sara","Alex","Jack"] +children.0 "Sara" +children.1 "Alex" +friends.1 {"first": "Roger", "last": "Craig", "age": 68} +friends.1.first "Roger" +``` + +### Wildcards + +A key may contain the special wildcard characters `*` and `?`. +The `*` will match on any zero+ characters, and `?` matches on any one character. + +```go +child*.2 "Jack" +c?ildren.0 "Sara" +``` + +### Escape character + +Special purpose characters, such as `.`, `*`, and `?` can be escaped with `\`. + +```go +fav\.movie "Deer Hunter" +``` + +You'll also need to make sure that the `\` character is correctly escaped when hardcoding a path in your source code. + +```go +// Go +val := gjson.Get(json, "fav\\.movie") // must escape the slash +val := gjson.Get(json, `fav\.movie`) // no need to escape the slash +``` + +```rust +// Rust +let val = gjson::get(json, "fav\\.movie") // must escape the slash +let val = gjson::get(json, r#"fav\.movie"#) // no need to escape the slash +``` + + +### Arrays + +The `#` character allows for digging into JSON Arrays. + +To get the length of an array you'll just use the `#` all by itself. + +```go +friends.# 3 +friends.#.age [44,68,47] +``` + +### Queries + +You can also query an array for the first match by using `#(...)`, or find all matches with `#(...)#`. +Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` comparison operators, +and the simple pattern matching `%` (like) and `!%` (not like) operators. + +```go +friends.#(last=="Murphy").first "Dale" +friends.#(last=="Murphy")#.first ["Dale","Jane"] +friends.#(age>45)#.last ["Craig","Murphy"] +friends.#(first%"D*").last "Murphy" +friends.#(first!%"D*").last "Craig" +``` + +To query for a non-object value in an array, you can forgo the string to the right of the operator. + +```go +children.#(!%"*a*") "Alex" +children.#(%"*a*")# ["Sara","Jack"] +``` + +Nested queries are allowed. + +```go +friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"] +``` + +*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was +changed in v1.3.0 as to avoid confusion with the new [multipath](#multipaths) +syntax. For backwards compatibility, `#[...]` will continue to work until the +next major release.* + +The `~` (tilde) operator will convert a value to a boolean before comparison. + +Supported tilde comparison type are: + +``` +~true Converts true-ish values to true +~false Converts false-ish and non-existent values to true +~null Converts null and non-existent values to true +~* Converts any existing value to true +``` + +For example, using the following JSON: + +```json +{ + "vals": [ + { "a": 1, "b": "data" }, + { "a": 2, "b": true }, + { "a": 3, "b": false }, + { "a": 4, "b": "0" }, + { "a": 5, "b": 0 }, + { "a": 6, "b": "1" }, + { "a": 7, "b": 1 }, + { "a": 8, "b": "true" }, + { "a": 9, "b": false }, + { "a": 10, "b": null }, + { "a": 11 } + ] +} +``` + +To query for all true-ish or false-ish values: + +``` +vals.#(b==~true)#.a >> [2,6,7,8] +vals.#(b==~false)#.a >> [3,4,5,9,10,11] +``` + +The last value which was non-existent is treated as `false` + +To query for null and explicit value existence: + +``` +vals.#(b==~null)#.a >> [10,11] +vals.#(b==~*)#.a >> [1,2,3,4,5,6,7,8,9,10] +vals.#(b!=~*)#.a >> [11] +``` + +### Dot vs Pipe + +The `.` is standard separator, but it's also possible to use a `|`. +In most cases they both end up returning the same results. +The cases where`|` differs from `.` is when it's used after the `#` for [Arrays](#arrays) and [Queries](#queries). + +Here are some examples + +```go +friends.0.first "Dale" +friends|0.first "Dale" +friends.0|first "Dale" +friends|0|first "Dale" +friends|# 3 +friends.# 3 +friends.#(last="Murphy")# [{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}] +friends.#(last="Murphy")#.first ["Dale","Jane"] +friends.#(last="Murphy")#|first +friends.#(last="Murphy")#.0 [] +friends.#(last="Murphy")#|0 {"first": "Dale", "last": "Murphy", "age": 44} +friends.#(last="Murphy")#.# [] +friends.#(last="Murphy")#|# 2 +``` + +Let's break down a few of these. + +The path `friends.#(last="Murphy")#` all by itself results in + +```json +[{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}] +``` + +The `.first` suffix will process the `first` path on each array element *before* returning the results. Which becomes + +```json +["Dale","Jane"] +``` + +But the `|first` suffix actually processes the `first` path *after* the previous result. +Since the previous result is an array, not an object, it's not possible to process +because `first` does not exist. + +Yet, `|0` suffix returns + +```json +{"first": "Dale", "last": "Murphy", "age": 44} +``` + +Because `0` is the first index of the previous result. + +### Modifiers + +A modifier is a path component that performs custom processing on the JSON. + +For example, using the built-in `@reverse` modifier on the above JSON payload will reverse the `children` array: + +```go +children.@reverse ["Jack","Alex","Sara"] +children.@reverse.0 "Jack" +``` + +There are currently the following built-in modifiers: + +- `@reverse`: Reverse an array or the members of an object. +- `@ugly`: Remove all whitespace from JSON. +- `@pretty`: Make the JSON more human readable. +- `@this`: Returns the current element. It can be used to retrieve the root element. +- `@valid`: Ensure the json document is valid. +- `@flatten`: Flattens an array. +- `@join`: Joins multiple objects into a single object. +- `@keys`: Returns an array of keys for an object. +- `@values`: Returns an array of values for an object. +- `@tostr`: Converts json to a string. Wraps a json string. +- `@fromstr`: Converts a string from json. Unwraps a json string. +- `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db). +- `@dig`: Search for a value without providing its entire path. See [e8e87f2](https://github.com/tidwall/gjson/commit/e8e87f2a00dc41f3aba5631094e21f59a8cf8cbf). + +#### Modifier arguments + +A modifier may accept an optional argument. The argument can be a valid JSON payload or just characters. + +For example, the `@pretty` modifier takes a json object as its argument. + +``` +@pretty:{"sortKeys":true} +``` + +Which makes the json pretty and orders all of its keys. + +```json +{ + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"age": 44, "first": "Dale", "last": "Murphy"}, + {"age": 68, "first": "Roger", "last": "Craig"}, + {"age": 47, "first": "Jane", "last": "Murphy"} + ], + "name": {"first": "Tom", "last": "Anderson"} +} +``` + +*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`. +Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.* + +#### Custom modifiers + +You can also add custom modifiers. + +For example, here we create a modifier which makes the entire JSON payload upper or lower case. + +```go +gjson.AddModifier("case", func(json, arg string) string { + if arg == "upper" { + return strings.ToUpper(json) + } + if arg == "lower" { + return strings.ToLower(json) + } + return json +}) +"children.@case:upper" ["SARA","ALEX","JACK"] +"children.@case:lower.@reverse" ["jack","alex","sara"] +``` + +*Note: Custom modifiers are not yet available in the Rust version* + +### Multipaths + +Starting with v1.3.0, GJSON added the ability to join multiple paths together +to form new documents. Wrapping comma-separated paths between `[...]` or +`{...}` will result in a new array or object, respectively. + +For example, using the given multipath: + +``` +{name.first,age,"the_murphys":friends.#(last="Murphy")#.first} +``` + +Here we selected the first name, age, and the first name for friends with the +last name "Murphy". + +You'll notice that an optional key can be provided, in this case +"the_murphys", to force assign a key to a value. Otherwise, the name of the +actual field will be used, in this case "first". If a name cannot be +determined, then "_" is used. + +This results in + +```json +{"first":"Tom","age":37,"the_murphys":["Dale","Jane"]} +``` + +### Literals + +Starting with v1.12.0, GJSON added support of json literals, which provides a way for constructing static blocks of json. This is can be particularly useful when constructing a new json document using [multipaths](#multipaths). + +A json literal begins with the '!' declaration character. + +For example, using the given multipath: + +``` +{name.first,age,"company":!"Happysoft","employed":!true} +``` + +Here we selected the first name and age. Then add two new fields, "company" and "employed". + +This results in + +```json +{"first":"Tom","age":37,"company":"Happysoft","employed":true} +``` + +*See issue [#249](https://github.com/tidwall/gjson/issues/249) for additional context on JSON Literals.* diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go new file mode 100644 index 000000000..4acd087c0 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/gjson.go @@ -0,0 +1,3494 @@ +// Package gjson provides searching for json strings. +package gjson + +import ( + "strconv" + "strings" + "time" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/tidwall/match" + "github.com/tidwall/pretty" +) + +// Type is Result type +type Type int + +const ( + // Null is a null json value + Null Type = iota + // False is a json false boolean + False + // Number is json number + Number + // String is a json string + String + // True is a json true boolean + True + // JSON is a raw block of JSON + JSON +) + +// String returns a string representation of the type. +func (t Type) String() string { + switch t { + default: + return "" + case Null: + return "Null" + case False: + return "False" + case Number: + return "Number" + case String: + return "String" + case True: + return "True" + case JSON: + return "JSON" + } +} + +// Result represents a json value that is returned from Get(). +type Result struct { + // Type is the json type + Type Type + // Raw is the raw json + Raw string + // Str is the json string + Str string + // Num is the json number + Num float64 + // Index of raw value in original json, zero means index unknown + Index int + // Indexes of all the elements that match on a path containing the '#' + // query character. + Indexes []int +} + +// String returns a string representation of the value. +func (t Result) String() string { + switch t.Type { + default: + return "" + case False: + return "false" + case Number: + if len(t.Raw) == 0 { + // calculated result + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + var i int + if t.Raw[0] == '-' { + i++ + } + for ; i < len(t.Raw); i++ { + if t.Raw[i] < '0' || t.Raw[i] > '9' { + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + } + return t.Raw + case String: + return t.Str + case JSON: + return t.Raw + case True: + return "true" + } +} + +// Bool returns an boolean representation. +func (t Result) Bool() bool { + switch t.Type { + default: + return false + case True: + return true + case String: + b, _ := strconv.ParseBool(strings.ToLower(t.Str)) + return b + case Number: + return t.Num != 0 + } +} + +// Int returns an integer representation. +func (t Result) Int() int64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseInt(t.Str) + return n + case Number: + // try to directly convert the float64 to int64 + i, ok := safeInt(t.Num) + if ok { + return i + } + // now try to parse the raw string + i, ok = parseInt(t.Raw) + if ok { + return i + } + // fallback to a standard conversion + return int64(t.Num) + } +} + +// Uint returns an unsigned integer representation. +func (t Result) Uint() uint64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseUint(t.Str) + return n + case Number: + // try to directly convert the float64 to uint64 + i, ok := safeInt(t.Num) + if ok && i >= 0 { + return uint64(i) + } + // now try to parse the raw string + u, ok := parseUint(t.Raw) + if ok { + return u + } + // fallback to a standard conversion + return uint64(t.Num) + } +} + +// Float returns an float64 representation. +func (t Result) Float() float64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseFloat(t.Str, 64) + return n + case Number: + return t.Num + } +} + +// Time returns a time.Time representation. +func (t Result) Time() time.Time { + res, _ := time.Parse(time.RFC3339, t.String()) + return res +} + +// Array returns back an array of values. +// If the result represents a null value or is non-existent, then an empty +// array will be returned. +// If the result is not a JSON array, the return value will be an +// array containing one result. +func (t Result) Array() []Result { + if t.Type == Null { + return []Result{} + } + if !t.IsArray() { + return []Result{t} + } + r := t.arrayOrMap('[', false) + return r.a +} + +// IsObject returns true if the result value is a JSON object. +func (t Result) IsObject() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '{' +} + +// IsArray returns true if the result value is a JSON array. +func (t Result) IsArray() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '[' +} + +// IsBool returns true if the result value is a JSON boolean. +func (t Result) IsBool() bool { + return t.Type == True || t.Type == False +} + +// ForEach iterates through values. +// If the result represents a non-existent value, then no values will be +// iterated. If the result is an Object, the iterator will pass the key and +// value of each item. If the result is an Array, the iterator will only pass +// the value of each item. If the result is not a JSON array or object, the +// iterator will pass back one value equal to the result. +func (t Result) ForEach(iterator func(key, value Result) bool) { + if !t.Exists() { + return + } + if t.Type != JSON { + iterator(Result{}, t) + return + } + json := t.Raw + var obj bool + var i int + var key, value Result + for ; i < len(json); i++ { + if json[i] == '{' { + i++ + key.Type = String + obj = true + break + } else if json[i] == '[' { + i++ + key.Type = Number + key.Num = -1 + break + } + if json[i] > ' ' { + return + } + } + var str string + var vesc bool + var ok bool + var idx int + for ; i < len(json); i++ { + if obj { + if json[i] != '"' { + continue + } + s := i + i, str, vesc, ok = parseString(json, i+1) + if !ok { + return + } + if vesc { + key.Str = unescape(str[1 : len(str)-1]) + } else { + key.Str = str[1 : len(str)-1] + } + key.Raw = str + key.Index = s + t.Index + } else { + key.Num += 1 + } + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' { + continue + } + break + } + s := i + i, value, ok = parseAny(json, i, true) + if !ok { + return + } + if t.Indexes != nil { + if idx < len(t.Indexes) { + value.Index = t.Indexes[idx] + } + } else { + value.Index = s + t.Index + } + if !iterator(key, value) { + return + } + idx++ + } +} + +// Map returns back a map of values. The result should be a JSON object. +// If the result is not a JSON object, the return value will be an empty map. +func (t Result) Map() map[string]Result { + if t.Type != JSON { + return map[string]Result{} + } + r := t.arrayOrMap('{', false) + return r.o +} + +// Get searches result for the specified path. +// The result should be a JSON array or object. +func (t Result) Get(path string) Result { + r := Get(t.Raw, path) + if r.Indexes != nil { + for i := 0; i < len(r.Indexes); i++ { + r.Indexes[i] += t.Index + } + } else { + r.Index += t.Index + } + return r +} + +type arrayOrMapResult struct { + a []Result + ai []interface{} + o map[string]Result + oi map[string]interface{} + vc byte +} + +func (t Result) arrayOrMap(vc byte, valueize bool) (r arrayOrMapResult) { + var json = t.Raw + var i int + var value Result + var count int + var key Result + if vc == 0 { + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + r.vc = json[i] + i++ + break + } + if json[i] > ' ' { + goto end + } + } + } else { + for ; i < len(json); i++ { + if json[i] == vc { + i++ + break + } + if json[i] > ' ' { + goto end + } + } + r.vc = vc + } + if r.vc == '{' { + if valueize { + r.oi = make(map[string]interface{}) + } else { + r.o = make(map[string]Result) + } + } else { + if valueize { + r.ai = make([]interface{}, 0) + } else { + r.a = make([]Result, 0) + } + } + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + // get next value + if json[i] == ']' || json[i] == '}' { + break + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + value.Str = "" + } else { + continue + } + case '{', '[': + value.Type = JSON + value.Raw = squash(json[i:]) + value.Str, value.Num = "", 0 + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + value.Num = 0 + } + value.Index = i + t.Index + + i += len(value.Raw) - 1 + + if r.vc == '{' { + if count%2 == 0 { + key = value + } else { + if valueize { + if _, ok := r.oi[key.Str]; !ok { + r.oi[key.Str] = value.Value() + } + } else { + if _, ok := r.o[key.Str]; !ok { + r.o[key.Str] = value + } + } + } + count++ + } else { + if valueize { + r.ai = append(r.ai, value.Value()) + } else { + r.a = append(r.a, value) + } + } + } +end: + if t.Indexes != nil { + if len(t.Indexes) != len(r.a) { + for i := 0; i < len(r.a); i++ { + r.a[i].Index = 0 + } + } else { + for i := 0; i < len(r.a); i++ { + r.a[i].Index = t.Indexes[i] + } + } + } + return +} + +// Parse parses the json and returns a result. +// +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// If you are consuming JSON from an unpredictable source then you may want to +// use the Valid function first. +func Parse(json string) Result { + var value Result + i := 0 + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + value.Type = JSON + value.Raw = json[i:] // just take the entire raw + break + } + if json[i] <= ' ' { + continue + } + switch json[i] { + case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'i', 'I', 'N': + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + case 'n': + if i+1 < len(json) && json[i+1] != 'u' { + // nan + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + } else { + // null + value.Type = Null + value.Raw = tolit(json[i:]) + } + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + default: + return Result{} + } + break + } + if value.Exists() { + value.Index = i + } + return value +} + +// ParseBytes parses the json and returns a result. +// If working with bytes, this method preferred over Parse(string(data)) +func ParseBytes(json []byte) Result { + return Parse(string(json)) +} + +func squash(json string) string { + // expects that the lead character is a '[' or '{' or '(' or '"' + // squash the value, ignoring all nested arrays and objects. + var i, depth int + if json[0] != '"' { + i, depth = 1, 1 + } + for ; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + if depth == 0 { + if i >= len(json) { + return json + } + return json[:i+1] + } + case '{', '[', '(': + depth++ + case '}', ']', ')': + depth-- + if depth == 0 { + return json[:i+1] + } + } + } + } + return json +} + +func tonum(json string) (raw string, num float64) { + for i := 1; i < len(json); i++ { + // less than dash might have valid characters + if json[i] <= '-' { + if json[i] <= ' ' || json[i] == ',' { + // break on whitespace and comma + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + // could be a '+' or '-'. let's assume so. + } else if json[i] == ']' || json[i] == '}' { + // break on ']' or '}' + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + } + raw = json + num, _ = strconv.ParseFloat(raw, 64) + return +} + +func tolit(json string) (raw string) { + for i := 1; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return json[:i] + } + } + return json +} + +func tostr(json string) (raw string, str string) { + // expects that the lead character is a '"' + for i := 1; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return json[:i+1], json[1:i] + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + return json[:i+1], unescape(json[1:i]) + } + } + var ret string + if i+1 < len(json) { + ret = json[:i+1] + } else { + ret = json[:i] + } + return ret, unescape(json[1:i]) + } + } + return json, json[1:] +} + +// Exists returns true if value exists. +// +// if gjson.Get(json, "name.last").Exists(){ +// println("value exists") +// } +func (t Result) Exists() bool { + return t.Type != Null || len(t.Raw) != 0 +} + +// Value returns one of these types: +// +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +// map[string]interface{}, for JSON objects +// []interface{}, for JSON arrays +func (t Result) Value() interface{} { + if t.Type == String { + return t.Str + } + switch t.Type { + default: + return nil + case False: + return false + case Number: + return t.Num + case JSON: + r := t.arrayOrMap(0, true) + if r.vc == '{' { + return r.oi + } else if r.vc == '[' { + return r.ai + } + return nil + case True: + return true + } +} + +func parseString(json string, i int) (int, string, bool, bool) { + var s = i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return i + 1, json[s-1 : i+1], false, true + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + return i + 1, json[s-1 : i+1], true, true + } + } + break + } + } + return i, json[s-1:], false, false +} + +func parseNumber(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ']' || + json[i] == '}' { + return i, json[s:i] + } + } + return i, json[s:] +} + +func parseLiteral(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return i, json[s:i] + } + } + return i, json[s:] +} + +type arrayPathResult struct { + part string + path string + pipe string + piped bool + more bool + alogok bool + arrch bool + alogkey string + query struct { + on bool + all bool + path string + op string + value string + } +} + +func parseArrayPath(path string) (r arrayPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '|' { + r.part = path[:i] + r.pipe = path[i+1:] + r.piped = true + return + } + if path[i] == '.' { + r.part = path[:i] + if !r.arrch && i < len(path)-1 && isDotPiperChar(path[i+1:]) { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + return + } + if path[i] == '#' { + r.arrch = true + if i == 0 && len(path) > 1 { + if path[1] == '.' { + r.alogok = true + r.alogkey = path[2:] + r.path = path[:1] + } else if path[1] == '[' || path[1] == '(' { + // query + r.query.on = true + qpath, op, value, _, fi, vesc, ok := + parseQuery(path[i:]) + if !ok { + // bad query, end now + break + } + if len(value) >= 2 && value[0] == '"' && + value[len(value)-1] == '"' { + value = value[1 : len(value)-1] + if vesc { + value = unescape(value) + } + } + r.query.path = qpath + r.query.op = op + r.query.value = value + + i = fi - 1 + if i+1 < len(path) && path[i+1] == '#' { + r.query.all = true + } + } + } + continue + } + } + r.part = path + r.path = "" + return +} + +// splitQuery takes a query and splits it into three parts: +// +// path, op, middle, and right. +// +// So for this query: +// +// #(first_name=="Murphy").last +// +// Becomes +// +// first_name # path +// =="Murphy" # middle +// .last # right +// +// Or, +// +// #(service_roles.#(=="one")).cap +// +// Becomes +// +// service_roles.#(=="one") # path +// # middle +// .cap # right +func parseQuery(query string) ( + path, op, value, remain string, i int, vesc, ok bool, +) { + if len(query) < 2 || query[0] != '#' || + (query[1] != '(' && query[1] != '[') { + return "", "", "", "", i, false, false + } + i = 2 + j := 0 // start of value part + depth := 1 + for ; i < len(query); i++ { + if depth == 1 && j == 0 { + switch query[i] { + case '!', '=', '<', '>', '%': + // start of the value part + j = i + continue + } + } + if query[i] == '\\' { + i++ + } else if query[i] == '[' || query[i] == '(' { + depth++ + } else if query[i] == ']' || query[i] == ')' { + depth-- + if depth == 0 { + break + } + } else if query[i] == '"' { + // inside selector string, balance quotes + i++ + for ; i < len(query); i++ { + if query[i] == '\\' { + vesc = true + i++ + } else if query[i] == '"' { + break + } + } + } + } + if depth > 0 { + return "", "", "", "", i, false, false + } + if j > 0 { + path = trim(query[2:j]) + value = trim(query[j:i]) + remain = query[i+1:] + // parse the compare op from the value + var opsz int + switch { + case len(value) == 1: + opsz = 1 + case value[0] == '!' && value[1] == '=': + opsz = 2 + case value[0] == '!' && value[1] == '%': + opsz = 2 + case value[0] == '<' && value[1] == '=': + opsz = 2 + case value[0] == '>' && value[1] == '=': + opsz = 2 + case value[0] == '=' && value[1] == '=': + value = value[1:] + opsz = 1 + case value[0] == '<': + opsz = 1 + case value[0] == '>': + opsz = 1 + case value[0] == '=': + opsz = 1 + case value[0] == '%': + opsz = 1 + } + op = value[:opsz] + value = trim(value[opsz:]) + } else { + path = trim(query[2:i]) + remain = query[i+1:] + } + return path, op, value, remain, i + 1, vesc, true +} + +func trim(s string) string { +left: + if len(s) > 0 && s[0] <= ' ' { + s = s[1:] + goto left + } +right: + if len(s) > 0 && s[len(s)-1] <= ' ' { + s = s[:len(s)-1] + goto right + } + return s +} + +// peek at the next byte and see if it's a '@', '[', or '{'. +func isDotPiperChar(s string) bool { + if DisableModifiers { + return false + } + c := s[0] + if c == '@' { + // check that the next component is *not* a modifier. + i := 1 + for ; i < len(s); i++ { + if s[i] == '.' || s[i] == '|' || s[i] == ':' { + break + } + } + _, ok := modifiers[s[1:i]] + return ok + } + return c == '[' || c == '{' +} + +type objectPathResult struct { + part string + path string + pipe string + piped bool + wild bool + more bool +} + +func parseObjectPath(path string) (r objectPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '|' { + r.part = path[:i] + r.pipe = path[i+1:] + r.piped = true + return + } + if path[i] == '.' { + r.part = path[:i] + if i < len(path)-1 && isDotPiperChar(path[i+1:]) { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + return + } + if path[i] == '*' || path[i] == '?' { + r.wild = true + continue + } + if path[i] == '\\' { + // go into escape mode. this is a slower path that + // strips off the escape character from the part. + epart := []byte(path[:i]) + i++ + if i < len(path) { + epart = append(epart, path[i]) + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + if i < len(path) { + epart = append(epart, path[i]) + } + continue + } else if path[i] == '.' { + r.part = string(epart) + if i < len(path)-1 && isDotPiperChar(path[i+1:]) { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + return + } else if path[i] == '|' { + r.part = string(epart) + r.pipe = path[i+1:] + r.piped = true + return + } else if path[i] == '*' || path[i] == '?' { + r.wild = true + } + epart = append(epart, path[i]) + } + } + // append the last part + r.part = string(epart) + return + } + } + r.part = path + return +} + +func parseSquash(json string, i int) (int, string) { + // expects that the lead character is a '[' or '{' or '(' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' or '(' has already been read + s := i + i++ + depth := 1 + for ; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + case '{', '[', '(': + depth++ + case '}', ']', ')': + depth-- + if depth == 0 { + i++ + return i, json[s:i] + } + } + } + } + return i, json[s:] +} + +func parseObject(c *parseContext, i int, path string) (int, bool) { + var pmatch, kesc, vesc, ok, hit bool + var key, val string + rp := parseObjectPath(path) + if !rp.more && rp.piped { + c.pipe = rp.pipe + c.piped = true + } + for i < len(c.json) { + for ; i < len(c.json); i++ { + if c.json[i] == '"' { + // parse_key_string + // this is slightly different from getting s string value + // because we don't need the outer quotes. + i++ + var s = i + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + i, key, kesc, ok = i+1, c.json[s:i], false, true + goto parse_key_string_done + } + if c.json[i] == '\\' { + i++ + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + // look for an escaped slash + if c.json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if c.json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + i, key, kesc, ok = i+1, c.json[s:i], true, true + goto parse_key_string_done + } + } + break + } + } + key, kesc, ok = c.json[s:], false, false + parse_key_string_done: + break + } + if c.json[i] == '}' { + return i + 1, false + } + } + if !ok { + return i, false + } + if rp.wild { + if kesc { + pmatch = matchLimit(unescape(key), rp.part) + } else { + pmatch = matchLimit(key, rp.part) + } + } else { + if kesc { + pmatch = rp.part == unescape(key) + } else { + pmatch = rp.part == key + } + } + hit = pmatch && !rp.more + for ; i < len(c.json); i++ { + var num bool + switch c.json[i] { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if hit { + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case 'n': + if i+1 < len(c.json) && c.json[i+1] != 'u' { + num = true + break + } + fallthrough + case 't', 'f': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if hit { + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'i', 'I', 'N': + num = true + } + if num { + i, val = parseNumber(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + } + break + } + } + return i, false +} + +// matchLimit will limit the complexity of the match operation to avoid ReDos +// attacks from arbritary inputs. +// See the github.com/tidwall/match.MatchLimit function for more information. +func matchLimit(str, pattern string) bool { + matched, _ := match.MatchLimit(str, pattern, 10000) + return matched +} + +func falseish(t Result) bool { + switch t.Type { + case Null: + return true + case False: + return true + case String: + b, err := strconv.ParseBool(strings.ToLower(t.Str)) + if err != nil { + return false + } + return !b + case Number: + return t.Num == 0 + default: + return false + } +} + +func trueish(t Result) bool { + switch t.Type { + case True: + return true + case String: + b, err := strconv.ParseBool(strings.ToLower(t.Str)) + if err != nil { + return false + } + return b + case Number: + return t.Num != 0 + default: + return false + } +} + +func nullish(t Result) bool { + return t.Type == Null +} + +func queryMatches(rp *arrayPathResult, value Result) bool { + rpv := rp.query.value + if len(rpv) > 0 { + if rpv[0] == '~' { + // convert to bool + rpv = rpv[1:] + var ish, ok bool + switch rpv { + case "*": + ish, ok = value.Exists(), true + case "null": + ish, ok = nullish(value), true + case "true": + ish, ok = trueish(value), true + case "false": + ish, ok = falseish(value), true + } + if ok { + rpv = "true" + if ish { + value = Result{Type: True} + } else { + value = Result{Type: False} + } + } else { + rpv = "" + value = Result{} + } + } + } + if !value.Exists() { + return false + } + if rp.query.op == "" { + // the query is only looking for existence, such as: + // friends.#(name) + // which makes sure that the array "friends" has an element of + // "name" that exists + return true + } + switch value.Type { + case String: + switch rp.query.op { + case "=": + return value.Str == rpv + case "!=": + return value.Str != rpv + case "<": + return value.Str < rpv + case "<=": + return value.Str <= rpv + case ">": + return value.Str > rpv + case ">=": + return value.Str >= rpv + case "%": + return matchLimit(value.Str, rpv) + case "!%": + return !matchLimit(value.Str, rpv) + } + case Number: + rpvn, _ := strconv.ParseFloat(rpv, 64) + switch rp.query.op { + case "=": + return value.Num == rpvn + case "!=": + return value.Num != rpvn + case "<": + return value.Num < rpvn + case "<=": + return value.Num <= rpvn + case ">": + return value.Num > rpvn + case ">=": + return value.Num >= rpvn + } + case True: + switch rp.query.op { + case "=": + return rpv == "true" + case "!=": + return rpv != "true" + case ">": + return rpv == "false" + case ">=": + return true + } + case False: + switch rp.query.op { + case "=": + return rpv == "false" + case "!=": + return rpv != "false" + case "<": + return rpv == "true" + case "<=": + return true + } + } + return false +} +func parseArray(c *parseContext, i int, path string) (int, bool) { + var pmatch, vesc, ok, hit bool + var val string + var h int + var alog []int + var partidx int + var multires []byte + var queryIndexes []int + rp := parseArrayPath(path) + if !rp.arrch { + n, ok := parseUint(rp.part) + if !ok { + partidx = -1 + } else { + partidx = int(n) + } + } + if !rp.more && rp.piped { + c.pipe = rp.pipe + c.piped = true + } + + procQuery := func(qval Result) bool { + if rp.query.all { + if len(multires) == 0 { + multires = append(multires, '[') + } + } + var tmp parseContext + tmp.value = qval + fillIndex(c.json, &tmp) + parentIndex := tmp.value.Index + var res Result + if qval.Type == JSON { + res = qval.Get(rp.query.path) + } else { + if rp.query.path != "" { + return false + } + res = qval + } + if queryMatches(&rp, res) { + if rp.more { + left, right, ok := splitPossiblePipe(rp.path) + if ok { + rp.path = left + c.pipe = right + c.piped = true + } + res = qval.Get(rp.path) + } else { + res = qval + } + if rp.query.all { + raw := res.Raw + if len(raw) == 0 { + raw = res.String() + } + if raw != "" { + if len(multires) > 1 { + multires = append(multires, ',') + } + multires = append(multires, raw...) + queryIndexes = append(queryIndexes, res.Index+parentIndex) + } + } else { + c.value = res + return true + } + } + return false + } + for i < len(c.json)+1 { + if !rp.arrch { + pmatch = partidx == h + hit = pmatch && !rp.more + } + h++ + if rp.alogok { + alog = append(alog, i) + } + for ; ; i++ { + var ch byte + if i > len(c.json) { + break + } else if i == len(c.json) { + ch = ']' + } else { + ch = c.json[i] + } + var num bool + switch ch { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if rp.query.on { + var qval Result + if vesc { + qval.Str = unescape(val[1 : len(val)-1]) + } else { + qval.Str = val[1 : len(val)-1] + } + qval.Raw = val + qval.Type = String + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + if procQuery(Result{Raw: val, Type: JSON}) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + if procQuery(Result{Raw: val, Type: JSON}) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case 'n': + if i+1 < len(c.json) && c.json[i+1] != 'u' { + num = true + break + } + fallthrough + case 't', 'f': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if rp.query.on { + var qval Result + qval.Raw = val + switch vc { + case 't': + qval.Type = True + case 'f': + qval.Type = False + } + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'i', 'I', 'N': + num = true + case ']': + if rp.arrch && rp.part == "#" { + if rp.alogok { + left, right, ok := splitPossiblePipe(rp.alogkey) + if ok { + rp.alogkey = left + c.pipe = right + c.piped = true + } + var indexes = make([]int, 0, 64) + var jsons = make([]byte, 0, 64) + jsons = append(jsons, '[') + for j, k := 0, 0; j < len(alog); j++ { + idx := alog[j] + for idx < len(c.json) { + switch c.json[idx] { + case ' ', '\t', '\r', '\n': + idx++ + continue + } + break + } + if idx < len(c.json) && c.json[idx] != ']' { + _, res, ok := parseAny(c.json, idx, true) + if ok { + res := res.Get(rp.alogkey) + if res.Exists() { + if k > 0 { + jsons = append(jsons, ',') + } + raw := res.Raw + if len(raw) == 0 { + raw = res.String() + } + jsons = append(jsons, []byte(raw)...) + indexes = append(indexes, res.Index) + k++ + } + } + } + } + jsons = append(jsons, ']') + c.value.Type = JSON + c.value.Raw = string(jsons) + c.value.Indexes = indexes + return i + 1, true + } + if rp.alogok { + break + } + + c.value.Type = Number + c.value.Num = float64(h - 1) + c.value.Raw = strconv.Itoa(h - 1) + c.calcd = true + return i + 1, true + } + if !c.value.Exists() { + if len(multires) > 0 { + c.value = Result{ + Raw: string(append(multires, ']')), + Type: JSON, + Indexes: queryIndexes, + } + } else if rp.query.all { + c.value = Result{ + Raw: "[]", + Type: JSON, + } + } + } + return i + 1, false + } + if num { + i, val = parseNumber(c.json, i) + if rp.query.on { + var qval Result + qval.Raw = val + qval.Type = Number + qval.Num, _ = strconv.ParseFloat(val, 64) + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + } + break + } + } + return i, false +} + +func splitPossiblePipe(path string) (left, right string, ok bool) { + // take a quick peek for the pipe character. If found we'll split the piped + // part of the path into the c.pipe field and shorten the rp. + var possible bool + for i := 0; i < len(path); i++ { + if path[i] == '|' { + possible = true + break + } + } + if !possible { + return + } + + if len(path) > 0 && path[0] == '{' { + squashed := squash(path[1:]) + if len(squashed) < len(path)-1 { + squashed = path[:len(squashed)+1] + remain := path[len(squashed):] + if remain[0] == '|' { + return squashed, remain[1:], true + } + } + return + } + + // split the left and right side of the path with the pipe character as + // the delimiter. This is a little tricky because we'll need to basically + // parse the entire path. + for i := 0; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == '.' { + if i == len(path)-1 { + return + } + if path[i+1] == '#' { + i += 2 + if i == len(path) { + return + } + if path[i] == '[' || path[i] == '(' { + var start, end byte + if path[i] == '[' { + start, end = '[', ']' + } else { + start, end = '(', ')' + } + // inside selector, balance brackets + i++ + depth := 1 + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == start { + depth++ + } else if path[i] == end { + depth-- + if depth == 0 { + break + } + } else if path[i] == '"' { + // inside selector string, balance quotes + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == '"' { + break + } + } + } + } + } + } + } else if path[i] == '|' { + return path[:i], path[i+1:], true + } + } + return +} + +// ForEachLine iterates through lines of JSON as specified by the JSON Lines +// format (http://jsonlines.org/). +// Each line is returned as a GJSON Result. +func ForEachLine(json string, iterator func(line Result) bool) { + var res Result + var i int + for { + i, res, _ = parseAny(json, i, true) + if !res.Exists() { + break + } + if !iterator(res) { + return + } + } +} + +type subSelector struct { + name string + path string +} + +// parseSubSelectors returns the subselectors belonging to a '[path1,path2]' or +// '{"field1":path1,"field2":path2}' type subSelection. It's expected that the +// first character in path is either '[' or '{', and has already been checked +// prior to calling this function. +func parseSubSelectors(path string) (sels []subSelector, out string, ok bool) { + modifier := 0 + depth := 1 + colon := 0 + start := 1 + i := 1 + pushSel := func() { + var sel subSelector + if colon == 0 { + sel.path = path[start:i] + } else { + sel.name = path[start:colon] + sel.path = path[colon+1 : i] + } + sels = append(sels, sel) + colon = 0 + modifier = 0 + start = i + 1 + } + for ; i < len(path); i++ { + switch path[i] { + case '\\': + i++ + case '@': + if modifier == 0 && i > 0 && (path[i-1] == '.' || path[i-1] == '|') { + modifier = i + } + case ':': + if modifier == 0 && colon == 0 && depth == 1 { + colon = i + } + case ',': + if depth == 1 { + pushSel() + } + case '"': + i++ + loop: + for ; i < len(path); i++ { + switch path[i] { + case '\\': + i++ + case '"': + break loop + } + } + case '[', '(', '{': + depth++ + case ']', ')', '}': + depth-- + if depth == 0 { + pushSel() + path = path[i+1:] + return sels, path, true + } + } + } + return +} + +// nameOfLast returns the name of the last component +func nameOfLast(path string) string { + for i := len(path) - 1; i >= 0; i-- { + if path[i] == '|' || path[i] == '.' { + if i > 0 { + if path[i-1] == '\\' { + continue + } + } + return path[i+1:] + } + } + return path +} + +func isSimpleName(component string) bool { + for i := 0; i < len(component); i++ { + if component[i] < ' ' { + return false + } + switch component[i] { + case '[', ']', '{', '}', '(', ')', '#', '|', '!': + return false + } + } + return true +} + +var hexchars = [...]byte{ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'a', 'b', 'c', 'd', 'e', 'f', +} + +func appendHex16(dst []byte, x uint16) []byte { + return append(dst, + hexchars[x>>12&0xF], hexchars[x>>8&0xF], + hexchars[x>>4&0xF], hexchars[x>>0&0xF], + ) +} + +// AppendJSONString is a convenience function that converts the provided string +// to a valid JSON string and appends it to dst. +func AppendJSONString(dst []byte, s string) []byte { + dst = append(dst, make([]byte, len(s)+2)...) + dst = append(dst[:len(dst)-len(s)-2], '"') + for i := 0; i < len(s); i++ { + if s[i] < ' ' { + dst = append(dst, '\\') + switch s[i] { + case '\b': + dst = append(dst, 'b') + case '\f': + dst = append(dst, 'f') + case '\n': + dst = append(dst, 'n') + case '\r': + dst = append(dst, 'r') + case '\t': + dst = append(dst, 't') + default: + dst = append(dst, 'u') + dst = appendHex16(dst, uint16(s[i])) + } + } else if s[i] == '>' || s[i] == '<' || s[i] == '&' { + dst = append(dst, '\\', 'u') + dst = appendHex16(dst, uint16(s[i])) + } else if s[i] == '\\' { + dst = append(dst, '\\', '\\') + } else if s[i] == '"' { + dst = append(dst, '\\', '"') + } else if s[i] > 127 { + // read utf8 character + r, n := utf8.DecodeRuneInString(s[i:]) + if n == 0 { + break + } + if r == utf8.RuneError && n == 1 { + dst = append(dst, `\ufffd`...) + } else if r == '\u2028' || r == '\u2029' { + dst = append(dst, `\u202`...) + dst = append(dst, hexchars[r&0xF]) + } else { + dst = append(dst, s[i:i+n]...) + } + i = i + n - 1 + } else { + dst = append(dst, s[i]) + } + } + return append(dst, '"') +} + +type parseContext struct { + json string + value Result + pipe string + piped bool + calcd bool + lines bool +} + +// Get searches json for the specified path. +// A path is in dot syntax, such as "name.last" or "age". +// When the value is found it's returned immediately. +// +// A path is a series of keys separated by a dot. +// A key may contain special wildcard characters '*' and '?'. +// To access an array value use the index as the key. +// To get the number of elements in an array or to access a child path, use +// the '#' character. +// The dot and wildcard character can be escaped with '\'. +// +// { +// "name": {"first": "Tom", "last": "Anderson"}, +// "age":37, +// "children": ["Sara","Alex","Jack"], +// "friends": [ +// {"first": "James", "last": "Murphy"}, +// {"first": "Roger", "last": "Craig"} +// ] +// } +// "name.last" >> "Anderson" +// "age" >> 37 +// "children" >> ["Sara","Alex","Jack"] +// "children.#" >> 3 +// "children.1" >> "Alex" +// "child*.2" >> "Jack" +// "c?ildren.0" >> "Sara" +// "friends.#.first" >> ["James","Roger"] +// +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// If you are consuming JSON from an unpredictable source then you may want to +// use the Valid function first. +func Get(json, path string) Result { + if len(path) > 1 { + if (path[0] == '@' && !DisableModifiers) || path[0] == '!' { + // possible modifier + var ok bool + var npath string + var rjson string + if path[0] == '@' && !DisableModifiers { + npath, rjson, ok = execModifier(json, path) + } else if path[0] == '!' { + npath, rjson, ok = execStatic(json, path) + } + if ok { + path = npath + if len(path) > 0 && (path[0] == '|' || path[0] == '.') { + res := Get(rjson, path[1:]) + res.Index = 0 + res.Indexes = nil + return res + } + return Parse(rjson) + } + } + if path[0] == '[' || path[0] == '{' { + // using a subselector path + kind := path[0] + var ok bool + var subs []subSelector + subs, path, ok = parseSubSelectors(path) + if ok { + if len(path) == 0 || (path[0] == '|' || path[0] == '.') { + var b []byte + b = append(b, kind) + var i int + for _, sub := range subs { + res := Get(json, sub.path) + if res.Exists() { + if i > 0 { + b = append(b, ',') + } + if kind == '{' { + if len(sub.name) > 0 { + if sub.name[0] == '"' && Valid(sub.name) { + b = append(b, sub.name...) + } else { + b = AppendJSONString(b, sub.name) + } + } else { + last := nameOfLast(sub.path) + if isSimpleName(last) { + b = AppendJSONString(b, last) + } else { + b = AppendJSONString(b, "_") + } + } + b = append(b, ':') + } + var raw string + if len(res.Raw) == 0 { + raw = res.String() + if len(raw) == 0 { + raw = "null" + } + } else { + raw = res.Raw + } + b = append(b, raw...) + i++ + } + } + b = append(b, kind+2) + var res Result + res.Raw = string(b) + res.Type = JSON + if len(path) > 0 { + res = res.Get(path[1:]) + } + res.Index = 0 + return res + } + } + } + } + var i int + var c = &parseContext{json: json} + if len(path) >= 2 && path[0] == '.' && path[1] == '.' { + c.lines = true + parseArray(c, 0, path[2:]) + } else { + for ; i < len(c.json); i++ { + if c.json[i] == '{' { + i++ + parseObject(c, i, path) + break + } + if c.json[i] == '[' { + i++ + parseArray(c, i, path) + break + } + } + } + if c.piped { + res := c.value.Get(c.pipe) + res.Index = 0 + return res + } + fillIndex(json, c) + return c.value +} + +// GetBytes searches json for the specified path. +// If working with bytes, this method preferred over Get(string(data), path) +func GetBytes(json []byte, path string) Result { + return getBytes(json, path) +} + +// runeit returns the rune from the the \uXXXX +func runeit(json string) rune { + n, _ := strconv.ParseUint(json[:4], 16, 64) + return rune(n) +} + +// unescape unescapes a string +func unescape(json string) string { + var str = make([]byte, 0, len(json)) + for i := 0; i < len(json); i++ { + switch { + default: + str = append(str, json[i]) + case json[i] < ' ': + return string(str) + case json[i] == '\\': + i++ + if i >= len(json) { + return string(str) + } + switch json[i] { + default: + return string(str) + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + case '"': + str = append(str, '"') + case 'u': + if i+5 > len(json) { + return string(str) + } + r := runeit(json[i+1:]) + i += 5 + if utf16.IsSurrogate(r) { + // need another code + if len(json[i:]) >= 6 && json[i] == '\\' && + json[i+1] == 'u' { + // we expect it to be correct so just consume it + r = utf16.DecodeRune(r, runeit(json[i+2:])) + i += 6 + } + } + // provide enough space to encode the largest utf8 possible + str = append(str, 0, 0, 0, 0, 0, 0, 0, 0) + n := utf8.EncodeRune(str[len(str)-8:], r) + str = str[:len(str)-8+n] + i-- // backtrack index by one + } + } + } + return string(str) +} + +// Less return true if a token is less than another token. +// The caseSensitive paramater is used when the tokens are Strings. +// The order when comparing two different type is: +// +// Null < False < Number < String < True < JSON +func (t Result) Less(token Result, caseSensitive bool) bool { + if t.Type < token.Type { + return true + } + if t.Type > token.Type { + return false + } + if t.Type == String { + if caseSensitive { + return t.Str < token.Str + } + return stringLessInsensitive(t.Str, token.Str) + } + if t.Type == Number { + return t.Num < token.Num + } + return t.Raw < token.Raw +} + +func stringLessInsensitive(a, b string) bool { + for i := 0; i < len(a) && i < len(b); i++ { + if a[i] >= 'A' && a[i] <= 'Z' { + if b[i] >= 'A' && b[i] <= 'Z' { + // both are uppercase, do nothing + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } else { + // a is uppercase, convert a to lowercase + if a[i]+32 < b[i] { + return true + } else if a[i]+32 > b[i] { + return false + } + } + } else if b[i] >= 'A' && b[i] <= 'Z' { + // b is uppercase, convert b to lowercase + if a[i] < b[i]+32 { + return true + } else if a[i] > b[i]+32 { + return false + } + } else { + // neither are uppercase + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + } + return len(a) < len(b) +} + +// parseAny parses the next value from a json string. +// A Result is returned when the hit param is set. +// The return values are (i int, res Result, ok bool) +func parseAny(json string, i int, hit bool) (int, Result, bool) { + var res Result + var val string + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + i, val = parseSquash(json, i) + if hit { + res.Raw = val + res.Type = JSON + } + var tmp parseContext + tmp.value = res + fillIndex(json, &tmp) + return i, tmp.value, true + } + if json[i] <= ' ' { + continue + } + var num bool + switch json[i] { + case '"': + i++ + var vesc bool + var ok bool + i, val, vesc, ok = parseString(json, i) + if !ok { + return i, res, false + } + if hit { + res.Type = String + res.Raw = val + if vesc { + res.Str = unescape(val[1 : len(val)-1]) + } else { + res.Str = val[1 : len(val)-1] + } + } + return i, res, true + case 'n': + if i+1 < len(json) && json[i+1] != 'u' { + num = true + break + } + fallthrough + case 't', 'f': + vc := json[i] + i, val = parseLiteral(json, i) + if hit { + res.Raw = val + switch vc { + case 't': + res.Type = True + case 'f': + res.Type = False + } + return i, res, true + } + case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'i', 'I', 'N': + num = true + } + if num { + i, val = parseNumber(json, i) + if hit { + res.Raw = val + res.Type = Number + res.Num, _ = strconv.ParseFloat(val, 64) + } + return i, res, true + } + + } + return i, res, false +} + +// GetMany searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetMany(json string, path ...string) []Result { + res := make([]Result, len(path)) + for i, path := range path { + res[i] = Get(json, path) + } + return res +} + +// GetManyBytes searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetManyBytes(json []byte, path ...string) []Result { + res := make([]Result, len(path)) + for i, path := range path { + res[i] = GetBytes(json, path) + } + return res +} + +func validpayload(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + i, ok = validany(data, i) + if !ok { + return i, false + } + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + } + } + return i, true + case ' ', '\t', '\n', '\r': + continue + } + } + return i, false +} +func validany(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '{': + return validobject(data, i+1) + case '[': + return validarray(data, i+1) + case '"': + return validstring(data, i+1) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return validnumber(data, i+1) + case 't': + return validtrue(data, i+1) + case 'f': + return validfalse(data, i+1) + case 'n': + return validnull(data, i+1) + } + } + return i, false +} +func validobject(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '}': + return i + 1, true + case '"': + key: + if i, ok = validstring(data, i+1); !ok { + return i, false + } + if i, ok = validcolon(data, i); !ok { + return i, false + } + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, '}'); !ok { + return i, false + } + if data[i] == '}' { + return i + 1, true + } + i++ + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '"': + goto key + } + } + return i, false + } + } + return i, false +} +func validcolon(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ':': + return i + 1, true + } + } + return i, false +} +func validcomma(data []byte, i int, end byte) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ',': + return i, true + case end: + return i, true + } + } + return i, false +} +func validarray(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + for ; i < len(data); i++ { + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, ']'); !ok { + return i, false + } + if data[i] == ']' { + return i + 1, true + } + } + case ' ', '\t', '\n', '\r': + continue + case ']': + return i + 1, true + } + } + return i, false +} +func validstring(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + if data[i] < ' ' { + return i, false + } else if data[i] == '\\' { + i++ + if i == len(data) { + return i, false + } + switch data[i] { + default: + return i, false + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + case 'u': + for j := 0; j < 4; j++ { + i++ + if i >= len(data) { + return i, false + } + if !((data[i] >= '0' && data[i] <= '9') || + (data[i] >= 'a' && data[i] <= 'f') || + (data[i] >= 'A' && data[i] <= 'F')) { + return i, false + } + } + } + } else if data[i] == '"' { + return i + 1, true + } + } + return i, false +} +func validnumber(data []byte, i int) (outi int, ok bool) { + i-- + // sign + if data[i] == '-' { + i++ + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + } + // int + if i == len(data) { + return i, false + } + if data[i] == '0' { + i++ + } else { + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // frac + if i == len(data) { + return i, true + } + if data[i] == '.' { + i++ + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // exp + if i == len(data) { + return i, true + } + if data[i] == 'e' || data[i] == 'E' { + i++ + if i == len(data) { + return i, false + } + if data[i] == '+' || data[i] == '-' { + i++ + } + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + return i, true +} + +func validtrue(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'r' && data[i+1] == 'u' && + data[i+2] == 'e' { + return i + 3, true + } + return i, false +} +func validfalse(data []byte, i int) (outi int, ok bool) { + if i+4 <= len(data) && data[i] == 'a' && data[i+1] == 'l' && + data[i+2] == 's' && data[i+3] == 'e' { + return i + 4, true + } + return i, false +} +func validnull(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'u' && data[i+1] == 'l' && + data[i+2] == 'l' { + return i + 3, true + } + return i, false +} + +// Valid returns true if the input is valid json. +// +// if !gjson.Valid(json) { +// return errors.New("invalid json") +// } +// value := gjson.Get(json, "name.last") +func Valid(json string) bool { + _, ok := validpayload(stringBytes(json), 0) + return ok +} + +// ValidBytes returns true if the input is valid json. +// +// if !gjson.Valid(json) { +// return errors.New("invalid json") +// } +// value := gjson.Get(json, "name.last") +// +// If working with bytes, this method preferred over ValidBytes(string(data)) +func ValidBytes(json []byte) bool { + _, ok := validpayload(json, 0) + return ok +} + +func parseUint(s string) (n uint64, ok bool) { + var i int + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + uint64(s[i]-'0') + } else { + return 0, false + } + } + return n, true +} + +func parseInt(s string) (n int64, ok bool) { + var i int + var sign bool + if len(s) > 0 && s[0] == '-' { + sign = true + i++ + } + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + int64(s[i]-'0') + } else { + return 0, false + } + } + if sign { + return n * -1, true + } + return n, true +} + +// safeInt validates a given JSON number +// ensures it lies within the minimum and maximum representable JSON numbers +func safeInt(f float64) (n int64, ok bool) { + // https://tc39.es/ecma262/#sec-number.min_safe_integer + // https://tc39.es/ecma262/#sec-number.max_safe_integer + if f < -9007199254740991 || f > 9007199254740991 { + return 0, false + } + return int64(f), true +} + +// execStatic parses the path to find a static value. +// The input expects that the path already starts with a '!' +func execStatic(json, path string) (pathOut, res string, ok bool) { + name := path[1:] + if len(name) > 0 { + switch name[0] { + case '{', '[', '"', '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9': + _, res = parseSquash(name, 0) + pathOut = name[len(res):] + return pathOut, res, true + } + } + for i := 1; i < len(path); i++ { + if path[i] == '|' { + pathOut = path[i:] + name = path[1:i] + break + } + if path[i] == '.' { + pathOut = path[i:] + name = path[1:i] + break + } + } + switch strings.ToLower(name) { + case "true", "false", "null", "nan", "inf": + return pathOut, name, true + } + return pathOut, res, false +} + +// execModifier parses the path to find a matching modifier function. +// The input expects that the path already starts with a '@' +func execModifier(json, path string) (pathOut, res string, ok bool) { + name := path[1:] + var hasArgs bool + for i := 1; i < len(path); i++ { + if path[i] == ':' { + pathOut = path[i+1:] + name = path[1:i] + hasArgs = len(pathOut) > 0 + break + } + if path[i] == '|' { + pathOut = path[i:] + name = path[1:i] + break + } + if path[i] == '.' { + pathOut = path[i:] + name = path[1:i] + break + } + } + if fn, ok := modifiers[name]; ok { + var args string + if hasArgs { + var parsedArgs bool + switch pathOut[0] { + case '{', '[', '"': + // json arg + res := Parse(pathOut) + if res.Exists() { + args = squash(pathOut) + pathOut = pathOut[len(args):] + parsedArgs = true + } + } + if !parsedArgs { + // simple arg + i := 0 + for ; i < len(pathOut); i++ { + if pathOut[i] == '|' { + break + } + switch pathOut[i] { + case '{', '[', '"', '(': + s := squash(pathOut[i:]) + i += len(s) - 1 + } + } + args = pathOut[:i] + pathOut = pathOut[i:] + } + } + return pathOut, fn(json, args), true + } + return pathOut, res, false +} + +// unwrap removes the '[]' or '{}' characters around json +func unwrap(json string) string { + json = trim(json) + if len(json) >= 2 && (json[0] == '[' || json[0] == '{') { + json = json[1 : len(json)-1] + } + return json +} + +// DisableModifiers will disable the modifier syntax +var DisableModifiers = false + +var modifiers map[string]func(json, arg string) string + +func init() { + modifiers = map[string]func(json, arg string) string{ + "pretty": modPretty, + "ugly": modUgly, + "reverse": modReverse, + "this": modThis, + "flatten": modFlatten, + "join": modJoin, + "valid": modValid, + "keys": modKeys, + "values": modValues, + "tostr": modToStr, + "fromstr": modFromStr, + "group": modGroup, + "dig": modDig, + } +} + +// AddModifier binds a custom modifier command to the GJSON syntax. +// This operation is not thread safe and should be executed prior to +// using all other gjson function. +func AddModifier(name string, fn func(json, arg string) string) { + modifiers[name] = fn +} + +// ModifierExists returns true when the specified modifier exists. +func ModifierExists(name string, fn func(json, arg string) string) bool { + _, ok := modifiers[name] + return ok +} + +// cleanWS remove any non-whitespace from string +func cleanWS(s string) string { + for i := 0; i < len(s); i++ { + switch s[i] { + case ' ', '\t', '\n', '\r': + continue + default: + var s2 []byte + for i := 0; i < len(s); i++ { + switch s[i] { + case ' ', '\t', '\n', '\r': + s2 = append(s2, s[i]) + } + } + return string(s2) + } + } + return s +} + +// @pretty modifier makes the json look nice. +func modPretty(json, arg string) string { + if len(arg) > 0 { + opts := *pretty.DefaultOptions + Parse(arg).ForEach(func(key, value Result) bool { + switch key.String() { + case "sortKeys": + opts.SortKeys = value.Bool() + case "indent": + opts.Indent = cleanWS(value.String()) + case "prefix": + opts.Prefix = cleanWS(value.String()) + case "width": + opts.Width = int(value.Int()) + } + return true + }) + return bytesString(pretty.PrettyOptions(stringBytes(json), &opts)) + } + return bytesString(pretty.Pretty(stringBytes(json))) +} + +// @this returns the current element. Can be used to retrieve the root element. +func modThis(json, arg string) string { + return json +} + +// @ugly modifier removes all whitespace. +func modUgly(json, arg string) string { + return bytesString(pretty.Ugly(stringBytes(json))) +} + +// @reverse reverses array elements or root object members. +func modReverse(json, arg string) string { + res := Parse(json) + if res.IsArray() { + var values []Result + res.ForEach(func(_, value Result) bool { + values = append(values, value) + return true + }) + out := make([]byte, 0, len(json)) + out = append(out, '[') + for i, j := len(values)-1, 0; i >= 0; i, j = i-1, j+1 { + if j > 0 { + out = append(out, ',') + } + out = append(out, values[i].Raw...) + } + out = append(out, ']') + return bytesString(out) + } + if res.IsObject() { + var keyValues []Result + res.ForEach(func(key, value Result) bool { + keyValues = append(keyValues, key, value) + return true + }) + out := make([]byte, 0, len(json)) + out = append(out, '{') + for i, j := len(keyValues)-2, 0; i >= 0; i, j = i-2, j+1 { + if j > 0 { + out = append(out, ',') + } + out = append(out, keyValues[i+0].Raw...) + out = append(out, ':') + out = append(out, keyValues[i+1].Raw...) + } + out = append(out, '}') + return bytesString(out) + } + return json +} + +// @flatten an array with child arrays. +// +// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,[6,7]] +// +// The {"deep":true} arg can be provide for deep flattening. +// +// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,6,7] +// +// The original json is returned when the json is not an array. +func modFlatten(json, arg string) string { + res := Parse(json) + if !res.IsArray() { + return json + } + var deep bool + if arg != "" { + Parse(arg).ForEach(func(key, value Result) bool { + if key.String() == "deep" { + deep = value.Bool() + } + return true + }) + } + var out []byte + out = append(out, '[') + var idx int + res.ForEach(func(_, value Result) bool { + var raw string + if value.IsArray() { + if deep { + raw = unwrap(modFlatten(value.Raw, arg)) + } else { + raw = unwrap(value.Raw) + } + } else { + raw = value.Raw + } + raw = strings.TrimSpace(raw) + if len(raw) > 0 { + if idx > 0 { + out = append(out, ',') + } + out = append(out, raw...) + idx++ + } + return true + }) + out = append(out, ']') + return bytesString(out) +} + +// @keys extracts the keys from an object. +// +// {"first":"Tom","last":"Smith"} -> ["first","last"] +func modKeys(json, arg string) string { + v := Parse(json) + if !v.Exists() { + return "[]" + } + obj := v.IsObject() + var out strings.Builder + out.WriteByte('[') + var i int + v.ForEach(func(key, _ Result) bool { + if i > 0 { + out.WriteByte(',') + } + if obj { + out.WriteString(key.Raw) + } else { + out.WriteString("null") + } + i++ + return true + }) + out.WriteByte(']') + return out.String() +} + +// @values extracts the values from an object. +// +// {"first":"Tom","last":"Smith"} -> ["Tom","Smith"] +func modValues(json, arg string) string { + v := Parse(json) + if !v.Exists() { + return "[]" + } + if v.IsArray() { + return json + } + var out strings.Builder + out.WriteByte('[') + var i int + v.ForEach(func(_, value Result) bool { + if i > 0 { + out.WriteByte(',') + } + out.WriteString(value.Raw) + i++ + return true + }) + out.WriteByte(']') + return out.String() +} + +// @join multiple objects into a single object. +// +// [{"first":"Tom"},{"last":"Smith"}] -> {"first","Tom","last":"Smith"} +// +// The arg can be "true" to specify that duplicate keys should be preserved. +// +// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":37,"age":41} +// +// Without preserved keys: +// +// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":41} +// +// The original json is returned when the json is not an object. +func modJoin(json, arg string) string { + res := Parse(json) + if !res.IsArray() { + return json + } + var preserve bool + if arg != "" { + Parse(arg).ForEach(func(key, value Result) bool { + if key.String() == "preserve" { + preserve = value.Bool() + } + return true + }) + } + var out []byte + out = append(out, '{') + if preserve { + // Preserve duplicate keys. + var idx int + res.ForEach(func(_, value Result) bool { + if !value.IsObject() { + return true + } + if idx > 0 { + out = append(out, ',') + } + out = append(out, unwrap(value.Raw)...) + idx++ + return true + }) + } else { + // Deduplicate keys and generate an object with stable ordering. + var keys []Result + kvals := make(map[string]Result) + res.ForEach(func(_, value Result) bool { + if !value.IsObject() { + return true + } + value.ForEach(func(key, value Result) bool { + k := key.String() + if _, ok := kvals[k]; !ok { + keys = append(keys, key) + } + kvals[k] = value + return true + }) + return true + }) + for i := 0; i < len(keys); i++ { + if i > 0 { + out = append(out, ',') + } + out = append(out, keys[i].Raw...) + out = append(out, ':') + out = append(out, kvals[keys[i].String()].Raw...) + } + } + out = append(out, '}') + return bytesString(out) +} + +// @valid ensures that the json is valid before moving on. An empty string is +// returned when the json is not valid, otherwise it returns the original json. +func modValid(json, arg string) string { + if !Valid(json) { + return "" + } + return json +} + +// @fromstr converts a string to json +// +// "{\"id\":1023,\"name\":\"alert\"}" -> {"id":1023,"name":"alert"} +func modFromStr(json, arg string) string { + if !Valid(json) { + return "" + } + return Parse(json).String() +} + +// @tostr converts a string to json +// +// {"id":1023,"name":"alert"} -> "{\"id\":1023,\"name\":\"alert\"}" +func modToStr(str, arg string) string { + return string(AppendJSONString(nil, str)) +} + +func modGroup(json, arg string) string { + res := Parse(json) + if !res.IsObject() { + return "" + } + var all [][]byte + res.ForEach(func(key, value Result) bool { + if !value.IsArray() { + return true + } + var idx int + value.ForEach(func(_, value Result) bool { + if idx == len(all) { + all = append(all, []byte{}) + } + all[idx] = append(all[idx], ("," + key.Raw + ":" + value.Raw)...) + idx++ + return true + }) + return true + }) + var data []byte + data = append(data, '[') + for i, item := range all { + if i > 0 { + data = append(data, ',') + } + data = append(data, '{') + data = append(data, item[1:]...) + data = append(data, '}') + } + data = append(data, ']') + return string(data) +} + +// stringHeader instead of reflect.StringHeader +type stringHeader struct { + data unsafe.Pointer + len int +} + +// sliceHeader instead of reflect.SliceHeader +type sliceHeader struct { + data unsafe.Pointer + len int + cap int +} + +// getBytes casts the input json bytes to a string and safely returns the +// results as uniquely allocated data. This operation is intended to minimize +// copies and allocations for the large json string->[]byte. +func getBytes(json []byte, path string) Result { + var result Result + if json != nil { + // unsafe cast to string + result = Get(*(*string)(unsafe.Pointer(&json)), path) + // safely get the string headers + rawhi := *(*stringHeader)(unsafe.Pointer(&result.Raw)) + strhi := *(*stringHeader)(unsafe.Pointer(&result.Str)) + // create byte slice headers + rawh := sliceHeader{data: rawhi.data, len: rawhi.len, cap: rawhi.len} + strh := sliceHeader{data: strhi.data, len: strhi.len, cap: rawhi.len} + if strh.data == nil { + // str is nil + if rawh.data == nil { + // raw is nil + result.Raw = "" + } else { + // raw has data, safely copy the slice header to a string + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + } + result.Str = "" + } else if rawh.data == nil { + // raw is nil + result.Raw = "" + // str has data, safely copy the slice header to a string + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } else if uintptr(strh.data) >= uintptr(rawh.data) && + uintptr(strh.data)+uintptr(strh.len) <= + uintptr(rawh.data)+uintptr(rawh.len) { + // Str is a substring of Raw. + start := uintptr(strh.data) - uintptr(rawh.data) + // safely copy the raw slice header + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + // substring the raw + result.Str = result.Raw[start : start+uintptr(strh.len)] + } else { + // safely copy both the raw and str slice headers to strings + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } + } + return result +} + +// fillIndex finds the position of Raw data and assigns it to the Index field +// of the resulting value. If the position cannot be found then Index zero is +// used instead. +func fillIndex(json string, c *parseContext) { + if len(c.value.Raw) > 0 && !c.calcd { + jhdr := *(*stringHeader)(unsafe.Pointer(&json)) + rhdr := *(*stringHeader)(unsafe.Pointer(&(c.value.Raw))) + c.value.Index = int(uintptr(rhdr.data) - uintptr(jhdr.data)) + if c.value.Index < 0 || c.value.Index >= len(json) { + c.value.Index = 0 + } + } +} + +func stringBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{ + data: (*stringHeader)(unsafe.Pointer(&s)).data, + len: len(s), + cap: len(s), + })) +} + +func bytesString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func revSquash(json string) string { + // reverse squash + // expects that the tail character is a ']' or '}' or ')' or '"' + // squash the value, ignoring all nested arrays and objects. + i := len(json) - 1 + var depth int + if json[i] != '"' { + depth++ + } + if json[i] == '}' || json[i] == ']' || json[i] == ')' { + i-- + } + for ; i >= 0; i-- { + switch json[i] { + case '"': + i-- + for ; i >= 0; i-- { + if json[i] == '"' { + esc := 0 + for i > 0 && json[i-1] == '\\' { + i-- + esc++ + } + if esc%2 == 1 { + continue + } + i += esc + break + } + } + if depth == 0 { + if i < 0 { + i = 0 + } + return json[i:] + } + case '}', ']', ')': + depth++ + case '{', '[', '(': + depth-- + if depth == 0 { + return json[i:] + } + } + } + return json +} + +// Paths returns the original GJSON paths for a Result where the Result came +// from a simple query path that returns an array, like: +// +// gjson.Get(json, "friends.#.first") +// +// The returned value will be in the form of a JSON array: +// +// ["friends.0.first","friends.1.first","friends.2.first"] +// +// The param 'json' must be the original JSON used when calling Get. +// +// Returns an empty string if the paths cannot be determined, which can happen +// when the Result came from a path that contained a multipath, modifier, +// or a nested query. +func (t Result) Paths(json string) []string { + if t.Indexes == nil { + return nil + } + paths := make([]string, 0, len(t.Indexes)) + t.ForEach(func(_, value Result) bool { + paths = append(paths, value.Path(json)) + return true + }) + if len(paths) != len(t.Indexes) { + return nil + } + return paths +} + +// Path returns the original GJSON path for a Result where the Result came +// from a simple path that returns a single value, like: +// +// gjson.Get(json, "friends.#(last=Murphy)") +// +// The returned value will be in the form of a JSON string: +// +// "friends.0" +// +// The param 'json' must be the original JSON used when calling Get. +// +// Returns an empty string if the paths cannot be determined, which can happen +// when the Result came from a path that contained a multipath, modifier, +// or a nested query. +func (t Result) Path(json string) string { + var path []byte + var comps []string // raw components + i := t.Index - 1 + if t.Index+len(t.Raw) > len(json) { + // JSON cannot safely contain Result. + goto fail + } + if !strings.HasPrefix(json[t.Index:], t.Raw) { + // Result is not at the JSON index as exepcted. + goto fail + } + for ; i >= 0; i-- { + if json[i] <= ' ' { + continue + } + if json[i] == ':' { + // inside of object, get the key + for ; i >= 0; i-- { + if json[i] != '"' { + continue + } + break + } + raw := revSquash(json[:i+1]) + i = i - len(raw) + comps = append(comps, raw) + // key gotten, now squash the rest + raw = revSquash(json[:i+1]) + i = i - len(raw) + i++ // increment the index for next loop step + } else if json[i] == '{' { + // Encountered an open object. The original result was probably an + // object key. + goto fail + } else if json[i] == ',' || json[i] == '[' { + // inside of an array, count the position + var arrIdx int + if json[i] == ',' { + arrIdx++ + i-- + } + for ; i >= 0; i-- { + if json[i] == ':' { + // Encountered an unexpected colon. The original result was + // probably an object key. + goto fail + } else if json[i] == ',' { + arrIdx++ + } else if json[i] == '[' { + comps = append(comps, strconv.Itoa(arrIdx)) + break + } else if json[i] == ']' || json[i] == '}' || json[i] == '"' { + raw := revSquash(json[:i+1]) + i = i - len(raw) + 1 + } + } + } + } + if len(comps) == 0 { + if DisableModifiers { + goto fail + } + return "@this" + } + for i := len(comps) - 1; i >= 0; i-- { + rcomp := Parse(comps[i]) + if !rcomp.Exists() { + goto fail + } + comp := Escape(rcomp.String()) + path = append(path, '.') + path = append(path, comp...) + } + if len(path) > 0 { + path = path[1:] + } + return string(path) +fail: + return "" +} + +// isSafePathKeyChar returns true if the input character is safe for not +// needing escaping. +func isSafePathKeyChar(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || c <= ' ' || c > '~' || c == '_' || + c == '-' || c == ':' +} + +// Escape returns an escaped path component. +// +// json := `{ +// "user":{ +// "first.name": "Janet", +// "last.name": "Prichard" +// } +// }` +// user := gjson.Get(json, "user") +// println(user.Get(gjson.Escape("first.name")) +// println(user.Get(gjson.Escape("last.name")) +// // Output: +// // Janet +// // Prichard +func Escape(comp string) string { + for i := 0; i < len(comp); i++ { + if !isSafePathKeyChar(comp[i]) { + ncomp := make([]byte, len(comp)+1) + copy(ncomp, comp[:i]) + ncomp = ncomp[:i] + for ; i < len(comp); i++ { + if !isSafePathKeyChar(comp[i]) { + ncomp = append(ncomp, '\\') + } + ncomp = append(ncomp, comp[i]) + } + return string(ncomp) + } + } + return comp +} + +func parseRecursiveDescent(all []Result, parent Result, path string) []Result { + if res := parent.Get(path); res.Exists() { + all = append(all, res) + } + if parent.IsArray() || parent.IsObject() { + parent.ForEach(func(_, val Result) bool { + all = parseRecursiveDescent(all, val, path) + return true + }) + } + return all +} + +func modDig(json, arg string) string { + all := parseRecursiveDescent(nil, Parse(json), arg) + var out []byte + out = append(out, '[') + for i, res := range all { + if i > 0 { + out = append(out, ',') + } + out = append(out, res.Raw...) + } + out = append(out, ']') + return string(out) +} diff --git a/vendor/github.com/tidwall/gjson/logo.png b/vendor/github.com/tidwall/gjson/logo.png new file mode 100644 index 000000000..17a8bbe9d Binary files /dev/null and b/vendor/github.com/tidwall/gjson/logo.png differ diff --git a/vendor/github.com/tidwall/match/LICENSE b/vendor/github.com/tidwall/match/LICENSE new file mode 100644 index 000000000..58f5819a4 --- /dev/null +++ b/vendor/github.com/tidwall/match/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/match/README.md b/vendor/github.com/tidwall/match/README.md new file mode 100644 index 000000000..5fdd4cf63 --- /dev/null +++ b/vendor/github.com/tidwall/match/README.md @@ -0,0 +1,29 @@ +# Match + +[![GoDoc](https://godoc.org/github.com/tidwall/match?status.svg)](https://godoc.org/github.com/tidwall/match) + +Match is a very simple pattern matcher where '*' matches on any +number characters and '?' matches on any one character. + +## Installing + +``` +go get -u github.com/tidwall/match +``` + +## Example + +```go +match.Match("hello", "*llo") +match.Match("jello", "?ello") +match.Match("hello", "h*o") +``` + + +## Contact + +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +Redcon source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/match/match.go b/vendor/github.com/tidwall/match/match.go new file mode 100644 index 000000000..11da28f1b --- /dev/null +++ b/vendor/github.com/tidwall/match/match.go @@ -0,0 +1,237 @@ +// Package match provides a simple pattern matcher with unicode support. +package match + +import ( + "unicode/utf8" +) + +// Match returns true if str matches pattern. This is a very +// simple wildcard match where '*' matches on any number characters +// and '?' matches on any one character. +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-Separator characters +// '?' matches any single non-Separator character +// c matches character c (c != '*', '?', '\\') +// '\\' c matches character c +// +func Match(str, pattern string) bool { + if pattern == "*" { + return true + } + return match(str, pattern, 0, nil, -1) == rMatch +} + +// MatchLimit is the same as Match but will limit the complexity of the match +// operation. This is to avoid long running matches, specifically to avoid ReDos +// attacks from arbritary inputs. +// +// How it works: +// The underlying match routine is recursive and may call itself when it +// encounters a sandwiched wildcard pattern, such as: `user:*:name`. +// Everytime it calls itself a counter is incremented. +// The operation is stopped when counter > maxcomp*len(str). +func MatchLimit(str, pattern string, maxcomp int) (matched, stopped bool) { + if pattern == "*" { + return true, false + } + counter := 0 + r := match(str, pattern, len(str), &counter, maxcomp) + if r == rStop { + return false, true + } + return r == rMatch, false +} + +type result int + +const ( + rNoMatch result = iota + rMatch + rStop +) + +func match(str, pat string, slen int, counter *int, maxcomp int) result { + // check complexity limit + if maxcomp > -1 { + if *counter > slen*maxcomp { + return rStop + } + *counter++ + } + + for len(pat) > 0 { + var wild bool + pc, ps := rune(pat[0]), 1 + if pc > 0x7f { + pc, ps = utf8.DecodeRuneInString(pat) + } + var sc rune + var ss int + if len(str) > 0 { + sc, ss = rune(str[0]), 1 + if sc > 0x7f { + sc, ss = utf8.DecodeRuneInString(str) + } + } + switch pc { + case '?': + if ss == 0 { + return rNoMatch + } + case '*': + // Ignore repeating stars. + for len(pat) > 1 && pat[1] == '*' { + pat = pat[1:] + } + + // If this star is the last character then it must be a match. + if len(pat) == 1 { + return rMatch + } + + // Match and trim any non-wildcard suffix characters. + var ok bool + str, pat, ok = matchTrimSuffix(str, pat) + if !ok { + return rNoMatch + } + + // Check for single star again. + if len(pat) == 1 { + return rMatch + } + + // Perform recursive wildcard search. + r := match(str, pat[1:], slen, counter, maxcomp) + if r != rNoMatch { + return r + } + if len(str) == 0 { + return rNoMatch + } + wild = true + default: + if ss == 0 { + return rNoMatch + } + if pc == '\\' { + pat = pat[ps:] + pc, ps = utf8.DecodeRuneInString(pat) + if ps == 0 { + return rNoMatch + } + } + if sc != pc { + return rNoMatch + } + } + str = str[ss:] + if !wild { + pat = pat[ps:] + } + } + if len(str) == 0 { + return rMatch + } + return rNoMatch +} + +// matchTrimSuffix matches and trims any non-wildcard suffix characters. +// Returns the trimed string and pattern. +// +// This is called because the pattern contains extra data after the wildcard +// star. Here we compare any suffix characters in the pattern to the suffix of +// the target string. Basically a reverse match that stops when a wildcard +// character is reached. This is a little trickier than a forward match because +// we need to evaluate an escaped character in reverse. +// +// Any matched characters will be trimmed from both the target +// string and the pattern. +func matchTrimSuffix(str, pat string) (string, string, bool) { + // It's expected that the pattern has at least two bytes and the first byte + // is a wildcard star '*' + match := true + for len(str) > 0 && len(pat) > 1 { + pc, ps := utf8.DecodeLastRuneInString(pat) + var esc bool + for i := 0; ; i++ { + if pat[len(pat)-ps-i-1] != '\\' { + if i&1 == 1 { + esc = true + ps++ + } + break + } + } + if pc == '*' && !esc { + match = true + break + } + sc, ss := utf8.DecodeLastRuneInString(str) + if !((pc == '?' && !esc) || pc == sc) { + match = false + break + } + str = str[:len(str)-ss] + pat = pat[:len(pat)-ps] + } + return str, pat, match +} + +var maxRuneBytes = [...]byte{244, 143, 191, 191} + +// Allowable parses the pattern and determines the minimum and maximum allowable +// values that the pattern can represent. +// When the max cannot be determined, 'true' will be returned +// for infinite. +func Allowable(pattern string) (min, max string) { + if pattern == "" || pattern[0] == '*' { + return "", "" + } + + minb := make([]byte, 0, len(pattern)) + maxb := make([]byte, 0, len(pattern)) + var wild bool + for i := 0; i < len(pattern); i++ { + if pattern[i] == '*' { + wild = true + break + } + if pattern[i] == '?' { + minb = append(minb, 0) + maxb = append(maxb, maxRuneBytes[:]...) + } else { + minb = append(minb, pattern[i]) + maxb = append(maxb, pattern[i]) + } + } + if wild { + r, n := utf8.DecodeLastRune(maxb) + if r != utf8.RuneError { + if r < utf8.MaxRune { + r++ + if r > 0x7f { + b := make([]byte, 4) + nn := utf8.EncodeRune(b, r) + maxb = append(maxb[:len(maxb)-n], b[:nn]...) + } else { + maxb = append(maxb[:len(maxb)-n], byte(r)) + } + } + } + } + return string(minb), string(maxb) +} + +// IsPattern returns true if the string is a pattern. +func IsPattern(str string) bool { + for i := 0; i < len(str); i++ { + if str[i] == '*' || str[i] == '?' { + return true + } + } + return false +} diff --git a/vendor/github.com/tidwall/pretty/LICENSE b/vendor/github.com/tidwall/pretty/LICENSE new file mode 100644 index 000000000..993b83f23 --- /dev/null +++ b/vendor/github.com/tidwall/pretty/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/pretty/README.md b/vendor/github.com/tidwall/pretty/README.md new file mode 100644 index 000000000..d3be5e54e --- /dev/null +++ b/vendor/github.com/tidwall/pretty/README.md @@ -0,0 +1,122 @@ +# Pretty + +[![GoDoc](https://img.shields.io/badge/api-reference-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/tidwall/pretty) + +Pretty is a Go package that provides [fast](#performance) methods for formatting JSON for human readability, or to compact JSON for smaller payloads. + +Getting Started +=============== + +## Installing + +To start using Pretty, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/pretty +``` + +This will retrieve the library. + +## Pretty + +Using this example: + +```json +{"name": {"first":"Tom","last":"Anderson"}, "age":37, +"children": ["Sara","Alex","Jack"], +"fav.movie": "Deer Hunter", "friends": [ + {"first": "Janet", "last": "Murphy", "age": 44} + ]} +``` + +The following code: +```go +result = pretty.Pretty(example) +``` + +Will format the json to: + +```json +{ + "name": { + "first": "Tom", + "last": "Anderson" + }, + "age": 37, + "children": ["Sara", "Alex", "Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + { + "first": "Janet", + "last": "Murphy", + "age": 44 + } + ] +} +``` + +## Color + +Color will colorize the json for outputing to the screen. + +```json +result = pretty.Color(json, nil) +``` + +Will add color to the result for printing to the terminal. +The second param is used for a customizing the style, and passing nil will use the default `pretty.TerminalStyle`. + +## Ugly + +The following code: +```go +result = pretty.Ugly(example) +``` + +Will format the json to: + +```json +{"name":{"first":"Tom","last":"Anderson"},"age":37,"children":["Sara","Alex","Jack"],"fav.movie":"Deer Hunter","friends":[{"first":"Janet","last":"Murphy","age":44}]}``` +``` + +## Customized output + +There's a `PrettyOptions(json, opts)` function which allows for customizing the output with the following options: + +```go +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} +``` +## Performance + +Benchmarks of Pretty alongside the builtin `encoding/json` Indent/Compact methods. +``` +BenchmarkPretty-16 1000000 1034 ns/op 720 B/op 2 allocs/op +BenchmarkPrettySortKeys-16 586797 1983 ns/op 2848 B/op 14 allocs/op +BenchmarkUgly-16 4652365 254 ns/op 240 B/op 1 allocs/op +BenchmarkUglyInPlace-16 6481233 183 ns/op 0 B/op 0 allocs/op +BenchmarkJSONIndent-16 450654 2687 ns/op 1221 B/op 0 allocs/op +BenchmarkJSONCompact-16 685111 1699 ns/op 442 B/op 0 allocs/op +``` + +*These benchmarks were run on a MacBook Pro 2.4 GHz 8-Core Intel Core i9.* + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +Pretty source code is available under the MIT [License](/LICENSE). + diff --git a/vendor/github.com/tidwall/pretty/pretty.go b/vendor/github.com/tidwall/pretty/pretty.go new file mode 100644 index 000000000..f3f756aad --- /dev/null +++ b/vendor/github.com/tidwall/pretty/pretty.go @@ -0,0 +1,674 @@ +package pretty + +import ( + "bytes" + "encoding/json" + "sort" + "strconv" +) + +// Options is Pretty options +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} + +// DefaultOptions is the default options for pretty formats. +var DefaultOptions = &Options{Width: 80, Prefix: "", Indent: " ", SortKeys: false} + +// Pretty converts the input json into a more human readable format where each +// element is on it's own line with clear indentation. +func Pretty(json []byte) []byte { return PrettyOptions(json, nil) } + +// PrettyOptions is like Pretty but with customized options. +func PrettyOptions(json []byte, opts *Options) []byte { + if opts == nil { + opts = DefaultOptions + } + buf := make([]byte, 0, len(json)) + if len(opts.Prefix) != 0 { + buf = append(buf, opts.Prefix...) + } + buf, _, _, _ = appendPrettyAny(buf, json, 0, true, + opts.Width, opts.Prefix, opts.Indent, opts.SortKeys, + 0, 0, -1) + if len(buf) > 0 { + buf = append(buf, '\n') + } + return buf +} + +// Ugly removes insignificant space characters from the input json byte slice +// and returns the compacted result. +func Ugly(json []byte) []byte { + buf := make([]byte, 0, len(json)) + return ugly(buf, json) +} + +// UglyInPlace removes insignificant space characters from the input json +// byte slice and returns the compacted result. This method reuses the +// input json buffer to avoid allocations. Do not use the original bytes +// slice upon return. +func UglyInPlace(json []byte) []byte { return ugly(json, json) } + +func ugly(dst, src []byte) []byte { + dst = dst[:0] + for i := 0; i < len(src); i++ { + if src[i] > ' ' { + dst = append(dst, src[i]) + if src[i] == '"' { + for i = i + 1; i < len(src); i++ { + dst = append(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + } + } + } + return dst +} + +func isNaNOrInf(src []byte) bool { + return src[0] == 'i' || //Inf + src[0] == 'I' || // inf + src[0] == '+' || // +Inf + src[0] == 'N' || // Nan + (src[0] == 'n' && len(src) > 1 && src[1] != 'u') // nan +} + +func appendPrettyAny(buf, json []byte, i int, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == '"' { + return appendPrettyString(buf, json, i, nl) + } + + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' || isNaNOrInf(json[i:]) { + return appendPrettyNumber(buf, json, i, nl) + } + if json[i] == '{' { + return appendPrettyObject(buf, json, i, '{', '}', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + if json[i] == '[' { + return appendPrettyObject(buf, json, i, '[', ']', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + switch json[i] { + case 't': + return append(buf, 't', 'r', 'u', 'e'), i + 4, nl, true + case 'f': + return append(buf, 'f', 'a', 'l', 's', 'e'), i + 5, nl, true + case 'n': + return append(buf, 'n', 'u', 'l', 'l'), i + 4, nl, true + } + } + return buf, i, nl, true +} + +type pair struct { + kstart, kend int + vstart, vend int +} + +type byKeyVal struct { + sorted bool + json []byte + buf []byte + pairs []pair +} + +func (arr *byKeyVal) Len() int { + return len(arr.pairs) +} +func (arr *byKeyVal) Less(i, j int) bool { + if arr.isLess(i, j, byKey) { + return true + } + if arr.isLess(j, i, byKey) { + return false + } + return arr.isLess(i, j, byVal) +} +func (arr *byKeyVal) Swap(i, j int) { + arr.pairs[i], arr.pairs[j] = arr.pairs[j], arr.pairs[i] + arr.sorted = true +} + +type byKind int + +const ( + byKey byKind = 0 + byVal byKind = 1 +) + +type jtype int + +const ( + jnull jtype = iota + jfalse + jnumber + jstring + jtrue + jjson +) + +func getjtype(v []byte) jtype { + if len(v) == 0 { + return jnull + } + switch v[0] { + case '"': + return jstring + case 'f': + return jfalse + case 't': + return jtrue + case 'n': + return jnull + case '[', '{': + return jjson + default: + return jnumber + } +} + +func (arr *byKeyVal) isLess(i, j int, kind byKind) bool { + k1 := arr.json[arr.pairs[i].kstart:arr.pairs[i].kend] + k2 := arr.json[arr.pairs[j].kstart:arr.pairs[j].kend] + var v1, v2 []byte + if kind == byKey { + v1 = k1 + v2 = k2 + } else { + v1 = bytes.TrimSpace(arr.buf[arr.pairs[i].vstart:arr.pairs[i].vend]) + v2 = bytes.TrimSpace(arr.buf[arr.pairs[j].vstart:arr.pairs[j].vend]) + if len(v1) >= len(k1)+1 { + v1 = bytes.TrimSpace(v1[len(k1)+1:]) + } + if len(v2) >= len(k2)+1 { + v2 = bytes.TrimSpace(v2[len(k2)+1:]) + } + } + t1 := getjtype(v1) + t2 := getjtype(v2) + if t1 < t2 { + return true + } + if t1 > t2 { + return false + } + if t1 == jstring { + s1 := parsestr(v1) + s2 := parsestr(v2) + return string(s1) < string(s2) + } + if t1 == jnumber { + n1, _ := strconv.ParseFloat(string(v1), 64) + n2, _ := strconv.ParseFloat(string(v2), 64) + return n1 < n2 + } + return string(v1) < string(v2) + +} + +func parsestr(s []byte) []byte { + for i := 1; i < len(s); i++ { + if s[i] == '\\' { + var str string + json.Unmarshal(s, &str) + return []byte(str) + } + if s[i] == '"' { + return s[1:i] + } + } + return nil +} + +func appendPrettyObject(buf, json []byte, i int, open, close byte, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + var ok bool + if width > 0 { + if pretty && open == '[' && max == -1 { + // here we try to create a single line array + max := width - (len(buf) - nl) + if max > 3 { + s1, s2 := len(buf), i + buf, i, _, ok = appendPrettyObject(buf, json, i, '[', ']', false, width, prefix, "", sortkeys, 0, 0, max) + if ok && len(buf)-s1 <= max { + return buf, i, nl, true + } + buf = buf[:s1] + i = s2 + } + } else if max != -1 && open == '{' { + return buf, i, nl, false + } + } + buf = append(buf, open) + i++ + var pairs []pair + if open == '{' && sortkeys { + pairs = make([]pair, 0, 8) + } + var n int + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == close { + if pretty { + if open == '{' && sortkeys { + buf = sortPairs(json, buf, pairs) + } + if n > 0 { + nl = len(buf) + if buf[nl-1] == ' ' { + buf[nl-1] = '\n' + } else { + buf = append(buf, '\n') + } + } + if buf[len(buf)-1] != open { + buf = appendTabs(buf, prefix, indent, tabs) + } + } + buf = append(buf, close) + return buf, i + 1, nl, open != '{' + } + if open == '[' || json[i] == '"' { + if n > 0 { + buf = append(buf, ',') + if width != -1 && open == '[' { + buf = append(buf, ' ') + } + } + var p pair + if pretty { + nl = len(buf) + if buf[nl-1] == ' ' { + buf[nl-1] = '\n' + } else { + buf = append(buf, '\n') + } + if open == '{' && sortkeys { + p.kstart = i + p.vstart = len(buf) + } + buf = appendTabs(buf, prefix, indent, tabs+1) + } + if open == '{' { + buf, i, nl, _ = appendPrettyString(buf, json, i, nl) + if sortkeys { + p.kend = i + } + buf = append(buf, ':') + if pretty { + buf = append(buf, ' ') + } + } + buf, i, nl, ok = appendPrettyAny(buf, json, i, pretty, width, prefix, indent, sortkeys, tabs+1, nl, max) + if max != -1 && !ok { + return buf, i, nl, false + } + if pretty && open == '{' && sortkeys { + p.vend = len(buf) + if p.kstart > p.kend || p.vstart > p.vend { + // bad data. disable sorting + sortkeys = false + } else { + pairs = append(pairs, p) + } + } + i-- + n++ + } + } + return buf, i, nl, open != '{' +} +func sortPairs(json, buf []byte, pairs []pair) []byte { + if len(pairs) == 0 { + return buf + } + vstart := pairs[0].vstart + vend := pairs[len(pairs)-1].vend + arr := byKeyVal{false, json, buf, pairs} + sort.Stable(&arr) + if !arr.sorted { + return buf + } + nbuf := make([]byte, 0, vend-vstart) + for i, p := range pairs { + nbuf = append(nbuf, buf[p.vstart:p.vend]...) + if i < len(pairs)-1 { + nbuf = append(nbuf, ',') + nbuf = append(nbuf, '\n') + } + } + return append(buf[:vstart], nbuf...) +} + +func appendPrettyString(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] == '"' { + var sc int + for j := i - 1; j > s; j-- { + if json[j] == '\\' { + sc++ + } else { + break + } + } + if sc%2 == 1 { + continue + } + i++ + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendPrettyNumber(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' || json[i] == ']' || json[i] == '}' { + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendTabs(buf []byte, prefix, indent string, tabs int) []byte { + if len(prefix) != 0 { + buf = append(buf, prefix...) + } + if len(indent) == 2 && indent[0] == ' ' && indent[1] == ' ' { + for i := 0; i < tabs; i++ { + buf = append(buf, ' ', ' ') + } + } else { + for i := 0; i < tabs; i++ { + buf = append(buf, indent...) + } + } + return buf +} + +// Style is the color style +type Style struct { + Key, String, Number [2]string + True, False, Null [2]string + Escape [2]string + Append func(dst []byte, c byte) []byte +} + +func hexp(p byte) byte { + switch { + case p < 10: + return p + '0' + default: + return (p - 10) + 'a' + } +} + +// TerminalStyle is for terminals +var TerminalStyle *Style + +func init() { + TerminalStyle = &Style{ + Key: [2]string{"\x1B[94m", "\x1B[0m"}, + String: [2]string{"\x1B[92m", "\x1B[0m"}, + Number: [2]string{"\x1B[93m", "\x1B[0m"}, + True: [2]string{"\x1B[96m", "\x1B[0m"}, + False: [2]string{"\x1B[96m", "\x1B[0m"}, + Null: [2]string{"\x1B[91m", "\x1B[0m"}, + Escape: [2]string{"\x1B[35m", "\x1B[0m"}, + Append: func(dst []byte, c byte) []byte { + if c < ' ' && (c != '\r' && c != '\n' && c != '\t' && c != '\v') { + dst = append(dst, "\\u00"...) + dst = append(dst, hexp((c>>4)&0xF)) + return append(dst, hexp((c)&0xF)) + } + return append(dst, c) + }, + } +} + +// Color will colorize the json. The style parma is used for customizing +// the colors. Passing nil to the style param will use the default +// TerminalStyle. +func Color(src []byte, style *Style) []byte { + if style == nil { + style = TerminalStyle + } + apnd := style.Append + if apnd == nil { + apnd = func(dst []byte, c byte) []byte { + return append(dst, c) + } + } + type stackt struct { + kind byte + key bool + } + var dst []byte + var stack []stackt + for i := 0; i < len(src); i++ { + if src[i] == '"' { + key := len(stack) > 0 && stack[len(stack)-1].key + if key { + dst = append(dst, style.Key[0]...) + } else { + dst = append(dst, style.String[0]...) + } + dst = apnd(dst, '"') + esc := false + uesc := 0 + for i = i + 1; i < len(src); i++ { + if src[i] == '\\' { + if key { + dst = append(dst, style.Key[1]...) + } else { + dst = append(dst, style.String[1]...) + } + dst = append(dst, style.Escape[0]...) + dst = apnd(dst, src[i]) + esc = true + if i+1 < len(src) && src[i+1] == 'u' { + uesc = 5 + } else { + uesc = 1 + } + } else if esc { + dst = apnd(dst, src[i]) + if uesc == 1 { + esc = false + dst = append(dst, style.Escape[1]...) + if key { + dst = append(dst, style.Key[0]...) + } else { + dst = append(dst, style.String[0]...) + } + } else { + uesc-- + } + } else { + dst = apnd(dst, src[i]) + } + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + if esc { + dst = append(dst, style.Escape[1]...) + } else if key { + dst = append(dst, style.Key[1]...) + } else { + dst = append(dst, style.String[1]...) + } + } else if src[i] == '{' || src[i] == '[' { + stack = append(stack, stackt{src[i], src[i] == '{'}) + dst = apnd(dst, src[i]) + } else if (src[i] == '}' || src[i] == ']') && len(stack) > 0 { + stack = stack[:len(stack)-1] + dst = apnd(dst, src[i]) + } else if (src[i] == ':' || src[i] == ',') && len(stack) > 0 && stack[len(stack)-1].kind == '{' { + stack[len(stack)-1].key = !stack[len(stack)-1].key + dst = apnd(dst, src[i]) + } else { + var kind byte + if (src[i] >= '0' && src[i] <= '9') || src[i] == '-' || isNaNOrInf(src[i:]) { + kind = '0' + dst = append(dst, style.Number[0]...) + } else if src[i] == 't' { + kind = 't' + dst = append(dst, style.True[0]...) + } else if src[i] == 'f' { + kind = 'f' + dst = append(dst, style.False[0]...) + } else if src[i] == 'n' { + kind = 'n' + dst = append(dst, style.Null[0]...) + } else { + dst = apnd(dst, src[i]) + } + if kind != 0 { + for ; i < len(src); i++ { + if src[i] <= ' ' || src[i] == ',' || src[i] == ':' || src[i] == ']' || src[i] == '}' { + i-- + break + } + dst = apnd(dst, src[i]) + } + if kind == '0' { + dst = append(dst, style.Number[1]...) + } else if kind == 't' { + dst = append(dst, style.True[1]...) + } else if kind == 'f' { + dst = append(dst, style.False[1]...) + } else if kind == 'n' { + dst = append(dst, style.Null[1]...) + } + } + } + } + return dst +} + +// Spec strips out comments and trailing commas and convert the input to a +// valid JSON per the official spec: https://tools.ietf.org/html/rfc8259 +// +// The resulting JSON will always be the same length as the input and it will +// include all of the same line breaks at matching offsets. This is to ensure +// the result can be later processed by a external parser and that that +// parser will report messages or errors with the correct offsets. +func Spec(src []byte) []byte { + return spec(src, nil) +} + +// SpecInPlace is the same as Spec, but this method reuses the input json +// buffer to avoid allocations. Do not use the original bytes slice upon return. +func SpecInPlace(src []byte) []byte { + return spec(src, src) +} + +func spec(src, dst []byte) []byte { + dst = dst[:0] + for i := 0; i < len(src); i++ { + if src[i] == '/' { + if i < len(src)-1 { + if src[i+1] == '/' { + dst = append(dst, ' ', ' ') + i += 2 + for ; i < len(src); i++ { + if src[i] == '\n' { + dst = append(dst, '\n') + break + } else if src[i] == '\t' || src[i] == '\r' { + dst = append(dst, src[i]) + } else { + dst = append(dst, ' ') + } + } + continue + } + if src[i+1] == '*' { + dst = append(dst, ' ', ' ') + i += 2 + for ; i < len(src)-1; i++ { + if src[i] == '*' && src[i+1] == '/' { + dst = append(dst, ' ', ' ') + i++ + break + } else if src[i] == '\n' || src[i] == '\t' || + src[i] == '\r' { + dst = append(dst, src[i]) + } else { + dst = append(dst, ' ') + } + } + continue + } + } + } + dst = append(dst, src[i]) + if src[i] == '"' { + for i = i + 1; i < len(src); i++ { + dst = append(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + } else if src[i] == '}' || src[i] == ']' { + for j := len(dst) - 2; j >= 0; j-- { + if dst[j] <= ' ' { + continue + } + if dst[j] == ',' { + dst[j] = ' ' + } + break + } + } + } + return dst +} diff --git a/vendor/github.com/tidwall/sjson/LICENSE b/vendor/github.com/tidwall/sjson/LICENSE new file mode 100644 index 000000000..89593c7c8 --- /dev/null +++ b/vendor/github.com/tidwall/sjson/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/tidwall/sjson/README.md b/vendor/github.com/tidwall/sjson/README.md new file mode 100644 index 000000000..4598424ef --- /dev/null +++ b/vendor/github.com/tidwall/sjson/README.md @@ -0,0 +1,278 @@ +

+SJSON +
+GoDoc +

+ +

set a json value quickly

+ +SJSON is a Go package that provides a [very fast](#performance) and simple way to set a value in a json document. +For quickly retrieving json values check out [GJSON](https://github.com/tidwall/gjson). + +For a command line interface check out [JJ](https://github.com/tidwall/jj). + +Getting Started +=============== + +Installing +---------- + +To start using SJSON, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/sjson +``` + +This will retrieve the library. + +Set a value +----------- +Set sets the value for the specified path. +A path is in dot syntax, such as "name.last" or "age". +This function expects that the json is well-formed and validated. +Invalid json will not panic, but it may return back unexpected results. +Invalid paths may return an error. + +```go +package main + +import "github.com/tidwall/sjson" + +const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}` + +func main() { + value, _ := sjson.Set(json, "name.last", "Anderson") + println(value) +} +``` + +This will print: + +```json +{"name":{"first":"Janet","last":"Anderson"},"age":47} +``` + +Path syntax +----------- + +A path is a series of keys separated by a dot. +The dot and colon characters can be escaped with ``\``. + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "James", "last": "Murphy"}, + {"first": "Roger", "last": "Craig"} + ] +} +``` +``` +"name.last" >> "Anderson" +"age" >> 37 +"children.1" >> "Alex" +"friends.1.last" >> "Craig" +``` + +The `-1` key can be used to append a value to an existing array: + +``` +"children.-1" >> appends a new value to the end of the children array +``` + +Normally number keys are used to modify arrays, but it's possible to force a numeric object key by using the colon character: + +```json +{ + "users":{ + "2313":{"name":"Sara"}, + "7839":{"name":"Andy"} + } +} +``` + +A colon path would look like: + +``` +"users.:2313.name" >> "Sara" +``` + +Supported types +--------------- + +Pretty much any type is supported: + +```go +sjson.Set(`{"key":true}`, "key", nil) +sjson.Set(`{"key":true}`, "key", false) +sjson.Set(`{"key":true}`, "key", 1) +sjson.Set(`{"key":true}`, "key", 10.5) +sjson.Set(`{"key":true}`, "key", "hello") +sjson.Set(`{"key":true}`, "key", []string{"hello", "world"}) +sjson.Set(`{"key":true}`, "key", map[string]interface{}{"hello":"world"}) +``` + +When a type is not recognized, SJSON will fallback to the `encoding/json` Marshaller. + + +Examples +-------- + +Set a value from empty document: +```go +value, _ := sjson.Set("", "name", "Tom") +println(value) + +// Output: +// {"name":"Tom"} +``` + +Set a nested value from empty document: +```go +value, _ := sjson.Set("", "name.last", "Anderson") +println(value) + +// Output: +// {"name":{"last":"Anderson"}} +``` + +Set a new value: +```go +value, _ := sjson.Set(`{"name":{"last":"Anderson"}}`, "name.first", "Sara") +println(value) + +// Output: +// {"name":{"first":"Sara","last":"Anderson"}} +``` + +Update an existing value: +```go +value, _ := sjson.Set(`{"name":{"last":"Anderson"}}`, "name.last", "Smith") +println(value) + +// Output: +// {"name":{"last":"Smith"}} +``` + +Set a new array value: +```go +value, _ := sjson.Set(`{"friends":["Andy","Carol"]}`, "friends.2", "Sara") +println(value) + +// Output: +// {"friends":["Andy","Carol","Sara"] +``` + +Append an array value by using the `-1` key in a path: +```go +value, _ := sjson.Set(`{"friends":["Andy","Carol"]}`, "friends.-1", "Sara") +println(value) + +// Output: +// {"friends":["Andy","Carol","Sara"] +``` + +Append an array value that is past the end: +```go +value, _ := sjson.Set(`{"friends":["Andy","Carol"]}`, "friends.4", "Sara") +println(value) + +// Output: +// {"friends":["Andy","Carol",null,null,"Sara"] +``` + +Delete a value: +```go +value, _ := sjson.Delete(`{"name":{"first":"Sara","last":"Anderson"}}`, "name.first") +println(value) + +// Output: +// {"name":{"last":"Anderson"}} +``` + +Delete an array value: +```go +value, _ := sjson.Delete(`{"friends":["Andy","Carol"]}`, "friends.1") +println(value) + +// Output: +// {"friends":["Andy"]} +``` + +Delete the last array value: +```go +value, _ := sjson.Delete(`{"friends":["Andy","Carol"]}`, "friends.-1") +println(value) + +// Output: +// {"friends":["Andy"]} +``` + +## Performance + +Benchmarks of SJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), +[ffjson](https://github.com/pquerna/ffjson), +[EasyJSON](https://github.com/mailru/easyjson), +and [Gabs](https://github.com/Jeffail/gabs) + +``` +Benchmark_SJSON-8 3000000 805 ns/op 1077 B/op 3 allocs/op +Benchmark_SJSON_ReplaceInPlace-8 3000000 449 ns/op 0 B/op 0 allocs/op +Benchmark_JSON_Map-8 300000 21236 ns/op 6392 B/op 150 allocs/op +Benchmark_JSON_Struct-8 300000 14691 ns/op 1789 B/op 24 allocs/op +Benchmark_Gabs-8 300000 21311 ns/op 6752 B/op 150 allocs/op +Benchmark_FFJSON-8 300000 17673 ns/op 3589 B/op 47 allocs/op +Benchmark_EasyJSON-8 1500000 3119 ns/op 1061 B/op 13 allocs/op +``` + +JSON document used: + +```json +{ + "widget": { + "debug": "on", + "window": { + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }, + "image": { + "src": "Images/Sun.png", + "hOffset": 250, + "vOffset": 250, + "alignment": "center" + }, + "text": { + "data": "Click Here", + "size": 36, + "style": "bold", + "vOffset": 100, + "alignment": "center", + "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" + } + } +} +``` + +Each operation was rotated though one of the following search paths: + +``` +widget.window.name +widget.image.hOffset +widget.text.onMouseUp +``` + +*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.7 and can be be found [here](https://github.com/tidwall/sjson-benchmarks)*. + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +SJSON source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/sjson/logo.png b/vendor/github.com/tidwall/sjson/logo.png new file mode 100644 index 000000000..b5aa257b6 Binary files /dev/null and b/vendor/github.com/tidwall/sjson/logo.png differ diff --git a/vendor/github.com/tidwall/sjson/sjson.go b/vendor/github.com/tidwall/sjson/sjson.go new file mode 100644 index 000000000..a55eef3fd --- /dev/null +++ b/vendor/github.com/tidwall/sjson/sjson.go @@ -0,0 +1,737 @@ +// Package sjson provides setting json values. +package sjson + +import ( + jsongo "encoding/json" + "sort" + "strconv" + "unsafe" + + "github.com/tidwall/gjson" +) + +type errorType struct { + msg string +} + +func (err *errorType) Error() string { + return err.msg +} + +// Options represents additional options for the Set and Delete functions. +type Options struct { + // Optimistic is a hint that the value likely exists which + // allows for the sjson to perform a fast-track search and replace. + Optimistic bool + // ReplaceInPlace is a hint to replace the input json rather than + // allocate a new json byte slice. When this field is specified + // the input json will not longer be valid and it should not be used + // In the case when the destination slice doesn't have enough free + // bytes to replace the data in place, a new bytes slice will be + // created under the hood. + // The Optimistic flag must be set to true and the input must be a + // byte slice in order to use this field. + ReplaceInPlace bool +} + +type pathResult struct { + part string // current key part + gpart string // gjson get part + path string // remaining path + force bool // force a string key + more bool // there is more path to parse +} + +func isSimpleChar(ch byte) bool { + switch ch { + case '|', '#', '@', '*', '?': + return false + default: + return true + } +} + +func parsePath(path string) (res pathResult, simple bool) { + var r pathResult + if len(path) > 0 && path[0] == ':' { + r.force = true + path = path[1:] + } + for i := 0; i < len(path); i++ { + if path[i] == '.' { + r.part = path[:i] + r.gpart = path[:i] + r.path = path[i+1:] + r.more = true + return r, true + } + if !isSimpleChar(path[i]) { + return r, false + } + if path[i] == '\\' { + // go into escape mode. this is a slower path that + // strips off the escape character from the part. + epart := []byte(path[:i]) + gpart := []byte(path[:i+1]) + i++ + if i < len(path) { + epart = append(epart, path[i]) + gpart = append(gpart, path[i]) + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + gpart = append(gpart, '\\') + i++ + if i < len(path) { + epart = append(epart, path[i]) + gpart = append(gpart, path[i]) + } + continue + } else if path[i] == '.' { + r.part = string(epart) + r.gpart = string(gpart) + r.path = path[i+1:] + r.more = true + return r, true + } else if !isSimpleChar(path[i]) { + return r, false + } + epart = append(epart, path[i]) + gpart = append(gpart, path[i]) + } + } + // append the last part + r.part = string(epart) + r.gpart = string(gpart) + return r, true + } + } + r.part = path + r.gpart = path + return r, true +} + +func mustMarshalString(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > 0x7f || s[i] == '"' || s[i] == '\\' { + return true + } + } + return false +} + +// appendStringify makes a json string and appends to buf. +func appendStringify(buf []byte, s string) []byte { + if mustMarshalString(s) { + b, _ := jsongo.Marshal(s) + return append(buf, b...) + } + buf = append(buf, '"') + buf = append(buf, s...) + buf = append(buf, '"') + return buf +} + +// appendBuild builds a json block from a json path. +func appendBuild(buf []byte, array bool, paths []pathResult, raw string, + stringify bool) []byte { + if !array { + buf = appendStringify(buf, paths[0].part) + buf = append(buf, ':') + } + if len(paths) > 1 { + n, numeric := atoui(paths[1]) + if numeric || (!paths[1].force && paths[1].part == "-1") { + buf = append(buf, '[') + buf = appendRepeat(buf, "null,", n) + buf = appendBuild(buf, true, paths[1:], raw, stringify) + buf = append(buf, ']') + } else { + buf = append(buf, '{') + buf = appendBuild(buf, false, paths[1:], raw, stringify) + buf = append(buf, '}') + } + } else { + if stringify { + buf = appendStringify(buf, raw) + } else { + buf = append(buf, raw...) + } + } + return buf +} + +// atoui does a rip conversion of string -> unigned int. +func atoui(r pathResult) (n int, ok bool) { + if r.force { + return 0, false + } + for i := 0; i < len(r.part); i++ { + if r.part[i] < '0' || r.part[i] > '9' { + return 0, false + } + n = n*10 + int(r.part[i]-'0') + } + return n, true +} + +// appendRepeat repeats string "n" times and appends to buf. +func appendRepeat(buf []byte, s string, n int) []byte { + for i := 0; i < n; i++ { + buf = append(buf, s...) + } + return buf +} + +// trim does a rip trim +func trim(s string) string { + for len(s) > 0 { + if s[0] <= ' ' { + s = s[1:] + continue + } + break + } + for len(s) > 0 { + if s[len(s)-1] <= ' ' { + s = s[:len(s)-1] + continue + } + break + } + return s +} + +// deleteTailItem deletes the previous key or comma. +func deleteTailItem(buf []byte) ([]byte, bool) { +loop: + for i := len(buf) - 1; i >= 0; i-- { + // look for either a ',',':','[' + switch buf[i] { + case '[': + return buf, true + case ',': + return buf[:i], false + case ':': + // delete tail string + i-- + for ; i >= 0; i-- { + if buf[i] == '"' { + i-- + for ; i >= 0; i-- { + if buf[i] == '"' { + i-- + if i >= 0 && buf[i] == '\\' { + i-- + continue + } + for ; i >= 0; i-- { + // look for either a ',','{' + switch buf[i] { + case '{': + return buf[:i+1], true + case ',': + return buf[:i], false + } + } + } + } + break + } + } + break loop + } + } + return buf, false +} + +var errNoChange = &errorType{"no change"} + +func appendRawPaths(buf []byte, jstr string, paths []pathResult, raw string, + stringify, del bool) ([]byte, error) { + var err error + var res gjson.Result + var found bool + if del { + if paths[0].part == "-1" && !paths[0].force { + res = gjson.Get(jstr, "#") + if res.Int() > 0 { + res = gjson.Get(jstr, strconv.FormatInt(int64(res.Int()-1), 10)) + found = true + } + } + } + if !found { + res = gjson.Get(jstr, paths[0].gpart) + } + if res.Index > 0 { + if len(paths) > 1 { + buf = append(buf, jstr[:res.Index]...) + buf, err = appendRawPaths(buf, res.Raw, paths[1:], raw, + stringify, del) + if err != nil { + return nil, err + } + buf = append(buf, jstr[res.Index+len(res.Raw):]...) + return buf, nil + } + buf = append(buf, jstr[:res.Index]...) + var exidx int // additional forward stripping + if del { + var delNextComma bool + buf, delNextComma = deleteTailItem(buf) + if delNextComma { + i, j := res.Index+len(res.Raw), 0 + for ; i < len(jstr); i, j = i+1, j+1 { + if jstr[i] <= ' ' { + continue + } + if jstr[i] == ',' { + exidx = j + 1 + } + break + } + } + } else { + if stringify { + buf = appendStringify(buf, raw) + } else { + buf = append(buf, raw...) + } + } + buf = append(buf, jstr[res.Index+len(res.Raw)+exidx:]...) + return buf, nil + } + if del { + return nil, errNoChange + } + n, numeric := atoui(paths[0]) + isempty := true + for i := 0; i < len(jstr); i++ { + if jstr[i] > ' ' { + isempty = false + break + } + } + if isempty { + if numeric { + jstr = "[]" + } else { + jstr = "{}" + } + } + jsres := gjson.Parse(jstr) + if jsres.Type != gjson.JSON { + if numeric { + jstr = "[]" + } else { + jstr = "{}" + } + jsres = gjson.Parse(jstr) + } + var comma bool + for i := 1; i < len(jsres.Raw); i++ { + if jsres.Raw[i] <= ' ' { + continue + } + if jsres.Raw[i] == '}' || jsres.Raw[i] == ']' { + break + } + comma = true + break + } + switch jsres.Raw[0] { + default: + return nil, &errorType{"json must be an object or array"} + case '{': + end := len(jsres.Raw) - 1 + for ; end > 0; end-- { + if jsres.Raw[end] == '}' { + break + } + } + buf = append(buf, jsres.Raw[:end]...) + if comma { + buf = append(buf, ',') + } + buf = appendBuild(buf, false, paths, raw, stringify) + buf = append(buf, '}') + return buf, nil + case '[': + var appendit bool + if !numeric { + if paths[0].part == "-1" && !paths[0].force { + appendit = true + } else { + return nil, &errorType{ + "cannot set array element for non-numeric key '" + + paths[0].part + "'"} + } + } + if appendit { + njson := trim(jsres.Raw) + if njson[len(njson)-1] == ']' { + njson = njson[:len(njson)-1] + } + buf = append(buf, njson...) + if comma { + buf = append(buf, ',') + } + + buf = appendBuild(buf, true, paths, raw, stringify) + buf = append(buf, ']') + return buf, nil + } + buf = append(buf, '[') + ress := jsres.Array() + for i := 0; i < len(ress); i++ { + if i > 0 { + buf = append(buf, ',') + } + buf = append(buf, ress[i].Raw...) + } + if len(ress) == 0 { + buf = appendRepeat(buf, "null,", n-len(ress)) + } else { + buf = appendRepeat(buf, ",null", n-len(ress)) + if comma { + buf = append(buf, ',') + } + } + buf = appendBuild(buf, true, paths, raw, stringify) + buf = append(buf, ']') + return buf, nil + } +} + +func isOptimisticPath(path string) bool { + for i := 0; i < len(path); i++ { + if path[i] < '.' || path[i] > 'z' { + return false + } + if path[i] > '9' && path[i] < 'A' { + return false + } + if path[i] > 'z' { + return false + } + } + return true +} + +// Set sets a json value for the specified path. +// A path is in dot syntax, such as "name.last" or "age". +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// An error is returned if the path is not valid. +// +// A path is a series of keys separated by a dot. +// +// { +// "name": {"first": "Tom", "last": "Anderson"}, +// "age":37, +// "children": ["Sara","Alex","Jack"], +// "friends": [ +// {"first": "James", "last": "Murphy"}, +// {"first": "Roger", "last": "Craig"} +// ] +// } +// "name.last" >> "Anderson" +// "age" >> 37 +// "children.1" >> "Alex" +// +func Set(json, path string, value interface{}) (string, error) { + return SetOptions(json, path, value, nil) +} + +// SetBytes sets a json value for the specified path. +// If working with bytes, this method preferred over +// Set(string(data), path, value) +func SetBytes(json []byte, path string, value interface{}) ([]byte, error) { + return SetBytesOptions(json, path, value, nil) +} + +// SetRaw sets a raw json value for the specified path. +// This function works the same as Set except that the value is set as a +// raw block of json. This allows for setting premarshalled json objects. +func SetRaw(json, path, value string) (string, error) { + return SetRawOptions(json, path, value, nil) +} + +// SetRawOptions sets a raw json value for the specified path with options. +// This furnction works the same as SetOptions except that the value is set +// as a raw block of json. This allows for setting premarshalled json objects. +func SetRawOptions(json, path, value string, opts *Options) (string, error) { + var optimistic bool + if opts != nil { + optimistic = opts.Optimistic + } + res, err := set(json, path, value, false, false, optimistic, false) + if err == errNoChange { + return json, nil + } + return string(res), err +} + +// SetRawBytes sets a raw json value for the specified path. +// If working with bytes, this method preferred over +// SetRaw(string(data), path, value) +func SetRawBytes(json []byte, path string, value []byte) ([]byte, error) { + return SetRawBytesOptions(json, path, value, nil) +} + +type dtype struct{} + +// Delete deletes a value from json for the specified path. +func Delete(json, path string) (string, error) { + return Set(json, path, dtype{}) +} + +// DeleteBytes deletes a value from json for the specified path. +func DeleteBytes(json []byte, path string) ([]byte, error) { + return SetBytes(json, path, dtype{}) +} + +type stringHeader struct { + data unsafe.Pointer + len int +} + +type sliceHeader struct { + data unsafe.Pointer + len int + cap int +} + +func set(jstr, path, raw string, + stringify, del, optimistic, inplace bool) ([]byte, error) { + if path == "" { + return []byte(jstr), &errorType{"path cannot be empty"} + } + if !del && optimistic && isOptimisticPath(path) { + res := gjson.Get(jstr, path) + if res.Exists() && res.Index > 0 { + sz := len(jstr) - len(res.Raw) + len(raw) + if stringify { + sz += 2 + } + if inplace && sz <= len(jstr) { + if !stringify || !mustMarshalString(raw) { + jsonh := *(*stringHeader)(unsafe.Pointer(&jstr)) + jsonbh := sliceHeader{ + data: jsonh.data, len: jsonh.len, cap: jsonh.len} + jbytes := *(*[]byte)(unsafe.Pointer(&jsonbh)) + if stringify { + jbytes[res.Index] = '"' + copy(jbytes[res.Index+1:], []byte(raw)) + jbytes[res.Index+1+len(raw)] = '"' + copy(jbytes[res.Index+1+len(raw)+1:], + jbytes[res.Index+len(res.Raw):]) + } else { + copy(jbytes[res.Index:], []byte(raw)) + copy(jbytes[res.Index+len(raw):], + jbytes[res.Index+len(res.Raw):]) + } + return jbytes[:sz], nil + } + return []byte(jstr), nil + } + buf := make([]byte, 0, sz) + buf = append(buf, jstr[:res.Index]...) + if stringify { + buf = appendStringify(buf, raw) + } else { + buf = append(buf, raw...) + } + buf = append(buf, jstr[res.Index+len(res.Raw):]...) + return buf, nil + } + } + var paths []pathResult + r, simple := parsePath(path) + if simple { + paths = append(paths, r) + for r.more { + r, simple = parsePath(r.path) + if !simple { + break + } + paths = append(paths, r) + } + } + if !simple { + if del { + return []byte(jstr), + &errorType{"cannot delete value from a complex path"} + } + return setComplexPath(jstr, path, raw, stringify) + } + njson, err := appendRawPaths(nil, jstr, paths, raw, stringify, del) + if err != nil { + return []byte(jstr), err + } + return njson, nil +} + +func setComplexPath(jstr, path, raw string, stringify bool) ([]byte, error) { + res := gjson.Get(jstr, path) + if !res.Exists() || !(res.Index != 0 || len(res.Indexes) != 0) { + return []byte(jstr), errNoChange + } + if res.Index != 0 { + njson := []byte(jstr[:res.Index]) + if stringify { + njson = appendStringify(njson, raw) + } else { + njson = append(njson, raw...) + } + njson = append(njson, jstr[res.Index+len(res.Raw):]...) + jstr = string(njson) + } + if len(res.Indexes) > 0 { + type val struct { + index int + res gjson.Result + } + vals := make([]val, 0, len(res.Indexes)) + res.ForEach(func(_, vres gjson.Result) bool { + vals = append(vals, val{res: vres}) + return true + }) + if len(res.Indexes) != len(vals) { + return []byte(jstr), errNoChange + } + for i := 0; i < len(res.Indexes); i++ { + vals[i].index = res.Indexes[i] + } + sort.SliceStable(vals, func(i, j int) bool { + return vals[i].index > vals[j].index + }) + for _, val := range vals { + vres := val.res + index := val.index + njson := []byte(jstr[:index]) + if stringify { + njson = appendStringify(njson, raw) + } else { + njson = append(njson, raw...) + } + njson = append(njson, jstr[index+len(vres.Raw):]...) + jstr = string(njson) + } + } + return []byte(jstr), nil +} + +// SetOptions sets a json value for the specified path with options. +// A path is in dot syntax, such as "name.last" or "age". +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// An error is returned if the path is not valid. +func SetOptions(json, path string, value interface{}, + opts *Options) (string, error) { + if opts != nil { + if opts.ReplaceInPlace { + // it's not safe to replace bytes in-place for strings + // copy the Options and set options.ReplaceInPlace to false. + nopts := *opts + opts = &nopts + opts.ReplaceInPlace = false + } + } + jsonh := *(*stringHeader)(unsafe.Pointer(&json)) + jsonbh := sliceHeader{data: jsonh.data, len: jsonh.len, cap: jsonh.len} + jsonb := *(*[]byte)(unsafe.Pointer(&jsonbh)) + res, err := SetBytesOptions(jsonb, path, value, opts) + return string(res), err +} + +// SetBytesOptions sets a json value for the specified path with options. +// If working with bytes, this method preferred over +// SetOptions(string(data), path, value) +func SetBytesOptions(json []byte, path string, value interface{}, + opts *Options) ([]byte, error) { + var optimistic, inplace bool + if opts != nil { + optimistic = opts.Optimistic + inplace = opts.ReplaceInPlace + } + jstr := *(*string)(unsafe.Pointer(&json)) + var res []byte + var err error + switch v := value.(type) { + default: + b, merr := jsongo.Marshal(value) + if merr != nil { + return nil, merr + } + raw := *(*string)(unsafe.Pointer(&b)) + res, err = set(jstr, path, raw, false, false, optimistic, inplace) + case dtype: + res, err = set(jstr, path, "", false, true, optimistic, inplace) + case string: + res, err = set(jstr, path, v, true, false, optimistic, inplace) + case []byte: + raw := *(*string)(unsafe.Pointer(&v)) + res, err = set(jstr, path, raw, true, false, optimistic, inplace) + case bool: + if v { + res, err = set(jstr, path, "true", false, false, optimistic, inplace) + } else { + res, err = set(jstr, path, "false", false, false, optimistic, inplace) + } + case int8: + res, err = set(jstr, path, strconv.FormatInt(int64(v), 10), + false, false, optimistic, inplace) + case int16: + res, err = set(jstr, path, strconv.FormatInt(int64(v), 10), + false, false, optimistic, inplace) + case int32: + res, err = set(jstr, path, strconv.FormatInt(int64(v), 10), + false, false, optimistic, inplace) + case int64: + res, err = set(jstr, path, strconv.FormatInt(int64(v), 10), + false, false, optimistic, inplace) + case uint8: + res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10), + false, false, optimistic, inplace) + case uint16: + res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10), + false, false, optimistic, inplace) + case uint32: + res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10), + false, false, optimistic, inplace) + case uint64: + res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10), + false, false, optimistic, inplace) + case float32: + res, err = set(jstr, path, strconv.FormatFloat(float64(v), 'f', -1, 64), + false, false, optimistic, inplace) + case float64: + res, err = set(jstr, path, strconv.FormatFloat(float64(v), 'f', -1, 64), + false, false, optimistic, inplace) + } + if err == errNoChange { + return json, nil + } + return res, err +} + +// SetRawBytesOptions sets a raw json value for the specified path with options. +// If working with bytes, this method preferred over +// SetRawOptions(string(data), path, value, opts) +func SetRawBytesOptions(json []byte, path string, value []byte, + opts *Options) ([]byte, error) { + jstr := *(*string)(unsafe.Pointer(&json)) + vstr := *(*string)(unsafe.Pointer(&value)) + var optimistic, inplace bool + if opts != nil { + optimistic = opts.Optimistic + inplace = opts.ReplaceInPlace + } + res, err := set(jstr, path, vstr, false, false, optimistic, inplace) + if err == errNoChange { + return json, nil + } + return res, err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9eaa6077f..54cfbd367 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,6 @@ +# github.com/Masterminds/semver/v3 v3.2.1 +## explicit; go 1.18 +github.com/Masterminds/semver/v3 # github.com/Microsoft/go-winio v0.6.1 ## explicit; go 1.17 github.com/Microsoft/go-winio @@ -49,10 +52,11 @@ github.com/containerd/cgroups/v3/cgroup1/stats # github.com/containerd/errdefs v0.1.0 ## explicit; go 1.20 github.com/containerd/errdefs -# github.com/containernetworking/cni v1.1.2 -## explicit; go 1.14 +# github.com/containernetworking/cni v1.2.0-rc1 +## explicit; go 1.18 github.com/containernetworking/cni/libcni github.com/containernetworking/cni/pkg/invoke +github.com/containernetworking/cni/pkg/ns github.com/containernetworking/cni/pkg/skel github.com/containernetworking/cni/pkg/types github.com/containernetworking/cni/pkg/types/020 @@ -74,13 +78,6 @@ github.com/d2g/dhcp4 # github.com/d2g/dhcp4client v1.0.0 ## explicit github.com/d2g/dhcp4client -# github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5 -## explicit -github.com/d2g/dhcp4server -github.com/d2g/dhcp4server/leasepool -github.com/d2g/dhcp4server/leasepool/memorypool -# github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4 -## explicit # github.com/go-logr/logr v1.4.1 ## explicit; go 1.18 github.com/go-logr/logr @@ -167,6 +164,18 @@ github.com/safchain/ethtool github.com/sirupsen/logrus # github.com/stretchr/testify v1.8.2 ## explicit; go 1.13 +# github.com/tidwall/gjson v1.17.1 +## explicit; go 1.12 +github.com/tidwall/gjson +# github.com/tidwall/match v1.1.1 +## explicit; go 1.15 +github.com/tidwall/match +# github.com/tidwall/pretty v1.2.0 +## explicit; go 1.16 +github.com/tidwall/pretty +# github.com/tidwall/sjson v1.2.5 +## explicit; go 1.14 +github.com/tidwall/sjson # github.com/vishvananda/netlink v1.2.1-beta.2 ## explicit; go 1.12 github.com/vishvananda/netlink