Skip to content

Commit

Permalink
enable kube-route to be run as daemonset, now just run kubectl create…
Browse files Browse the repository at this point in the history
… -f kube-router-daemonset.yaml
  • Loading branch information
Murali Reddy committed Apr 26, 2017
1 parent 6ecb4bb commit f8fbb42
Show file tree
Hide file tree
Showing 9 changed files with 162 additions and 47 deletions.
5 changes: 5 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
FROM alpine
RUN apk add --no-cache iptables ipset
COPY kube-router /

ENTRYPOINT ["/kube-router"]
12 changes: 8 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
#all: push
all:
go build -o kube-router kube-router.go
all: dockerimg

dockerimg: build
sudo docker build -t "cloudnativelabs/kube-router" .

build:
go build --ldflags '-extldflags "-static"' -o kube-router kube-router.go

clean:
rm -f kube-router

run:
./kube-router --kubeconfig=~/kubeconfig
./kube-router --kubeconfig=/var/lib/kube-router/kubeconfig
11 changes: 5 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
kube-router
==========
Kube-router is a distributed load balancer, firewall and router for Kubernetes. Kube-router can be configured to provide on each node:
Kube-router is a distributed load balancer, firewall and router for Kubernetes. Kube-router can be configured to provide on each cluster node:

- an ingress firewall for the pods running on the node as per the defined Kubernetes network policies
- a service proxy on each node for *ClusterIP* and *NodePort* service types, providing service discovery and load balancing
Expand All @@ -17,13 +17,12 @@ We have Kube-proxy which provides service proxy and load balancer. We have sever

## Theory of Operation

Kube-router runs as agent on each node and leverages standard Linux technologies **iptables, ipvs/lvs, ipset, iproute2**
Kube-router can be run as a agent or a pod (through daemonset) on each node and leverages standard Linux technologies **iptables, ipvs/lvs, ipset, iproute2**

### service proxy and load balancing

Kube-router uses IPVS/LVS technology built in Linux to provide L4 load balancing. Each of the kubernetes service of **ClusterIP** and **NodePort**
type is configured as IPVS virtual service. Each service endpoint is configured as real server to the virtual service.
Standard **ipvsadm** tool can be used to verify the configuration and monitor the status.
Kube-router uses IPVS/LVS technology built in Linux to provide L4 load balancing. Each of the kubernetes service of **ClusterIP** and **NodePort** type is configured as IPVS virtual service. Each service endpoint is configured as real server to the virtual service.
Standard **ipvsadm** tool can be used to verify the configuration and monitor the active connections.

Below is example set of services on kubernetes

Expand Down Expand Up @@ -100,7 +99,7 @@ Alternatively you can download the prebuilt binary from https://github.com/cloud
--run-router If true each node advertise routes the rest of the nodes and learn the routes for the pods. false by default
--run-service-proxy If false, kube-router won't setup IPVS for services proxy. true by default.
--cleanup-config If true cleanup iptables rules, ipvs, ipset configuration and exit.
--cni-conf-file string Full path to CNI configuration file.
--masquerade-all SNAT all traffic to cluster IP/node port. False by default
--config-sync-period duration How often configuration from the apiserver is refreshed. Must be greater than 0. (default 1m0s)
--iptables-sync-period duration The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m'). Must be greater than 0. (default 1m0s)
--ipvs-sync-period duration The maximum interval of how often ipvs config is refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. (default 1m0s)
Expand Down
43 changes: 26 additions & 17 deletions app/controllers/network_routes_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"net"
"os"
"strconv"
"strings"
"sync"
"time"

Expand All @@ -27,12 +28,30 @@ type NetworkRoutingController struct {
mu sync.Mutex
clientset *kubernetes.Clientset
bgpServer *gobgp.BgpServer
cniConfFile string
syncPeriod time.Duration
}

func (nrc *NetworkRoutingController) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {

cidr, err := utils.GetPodCidrFromCniSpec("/etc/cni/net.d/10-kuberouter.conf")
if err != nil {
glog.Errorf("Failed to get pod CIDR from CNI conf file: %s", err.Error())
}
cidrlen, _ := cidr.Mask.Size()
oldCidr := cidr.IP.String() + "/" + strconv.Itoa(cidrlen)

currentCidr, err := utils.GetPodCidrFromNodeSpec(nrc.clientset)
if err != nil {
glog.Errorf("Failed to get pod CIDR from node spec: %s", err.Error())
}

if len(cidr.IP) == 0 || strings.Compare(oldCidr, currentCidr) != 0 {
err = utils.InsertPodCidrInCniSpec("/etc/cni/net.d/10-kuberouter.conf", currentCidr)
if err != nil {
glog.Errorf("Failed to insert pod CIDR into CNI conf file: %s", err.Error())
}
}

t := time.NewTicker(nrc.syncPeriod)
defer t.Stop()
defer wg.Done()
Expand Down Expand Up @@ -110,17 +129,20 @@ func (nrc *NetworkRoutingController) watchBgpUpdates() {

func (nrc *NetworkRoutingController) advertiseRoute() error {

subnet, cidrlen, err := utils.GetPodCidrDetails(nrc.cniConfFile)
cidr, err := utils.GetPodCidrFromNodeSpec(nrc.clientset)
if err != nil {
return err
}
cidrStr := strings.Split(cidr, "/")
subnet := cidrStr[0]
cidrLen, err := strconv.Atoi(cidrStr[1])
attrs := []bgp.PathAttributeInterface{
bgp.NewPathAttributeOrigin(0),
bgp.NewPathAttributeNextHop(nrc.nodeIP.String()),
bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint32{4000, 400000, 300000, 40001})}),
}
glog.Infof("Advertising route: '%s/%s via %s' to peers", subnet, strconv.Itoa(cidrlen), nrc.nodeIP.String())
if _, err := nrc.bgpServer.AddPath("", []*table.Path{table.NewPath(nil, bgp.NewIPAddrPrefix(uint8(cidrlen),
glog.Infof("Advertising route: '%s/%s via %s' to peers", subnet, strconv.Itoa(cidrLen), nrc.nodeIP.String())
if _, err := nrc.bgpServer.AddPath("", []*table.Path{table.NewPath(nil, bgp.NewIPAddrPrefix(uint8(cidrLen),
subnet), false, attrs, time.Now(), false)}); err != nil {
return fmt.Errorf(err.Error())
}
Expand Down Expand Up @@ -151,19 +173,6 @@ func NewNetworkRoutingController(clientset *kubernetes.Clientset, kubeRouterConf

nrc.syncPeriod = kubeRouterConfig.RoutesSyncPeriod
nrc.clientset = clientset
nrc.cniConfFile = kubeRouterConfig.CniConfFile

if kubeRouterConfig.CniConfFile == "" {
panic("Please specify a valid CNF conf file path in the command line parameter --cni-conf-file ")
}

if _, err := os.Stat(nrc.cniConfFile); os.IsNotExist(err) {
panic("Specified CNI conf file does not exist. Conf file: " + nrc.cniConfFile)
}
_, _, err := utils.GetPodCidrDetails(nrc.cniConfFile)
if err != nil {
panic("Failed to read IPAM conf from the CNI conf file: " + nrc.cniConfFile + " due to " + err.Error())
}

nodeHostName, err := os.Hostname()
if err != nil {
Expand Down
30 changes: 20 additions & 10 deletions app/controllers/network_services_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ type NetworkServicesController struct {
endpointsMap endpointsInfoMap
podCidr string
masqueradeAll bool
client *kubernetes.Clientset
}

// internal representation of kubernetes service
Expand Down Expand Up @@ -348,13 +349,18 @@ func ensureMasqueradeIptablesRule(masqueradeAll bool, podCidr string) error {
var args []string
if masqueradeAll {
args = []string{"-m", "ipvs", "--ipvs", "--vdir", "ORIGINAL", "--vmethod", "MASQ", "-m", "comment", "--comment", "", "-j", "MASQUERADE"}
} else {
err = iptablesCmdHandler.AppendUnique("nat", "POSTROUTING", args...)
if err != nil {
return errors.New("Failed to run iptables command" + err.Error())
}
}
if len(podCidr) > 0 {
args = []string{"-m", "ipvs", "--ipvs", "--vdir", "ORIGINAL", "--vmethod", "MASQ", "-m", "comment", "--comment", "",
"!", "-s", podCidr, "-j", "MASQUERADE"}
}
err = iptablesCmdHandler.AppendUnique("nat", "POSTROUTING", args...)
if err != nil {
return errors.New("Failed to run iptables command" + err.Error())
err = iptablesCmdHandler.AppendUnique("nat", "POSTROUTING", args...)
if err != nil {
return errors.New("Failed to run iptables command" + err.Error())
}
}
glog.Infof("Successfully added iptables masqurade rule")
return nil
Expand Down Expand Up @@ -512,15 +518,19 @@ func NewNetworkServicesController(clientset *kubernetes.Clientset, config *optio

nsc.serviceMap = make(serviceInfoMap)
nsc.endpointsMap = make(endpointsInfoMap)
nsc.client = clientset

nsc.masqueradeAll = false
if config.MasqueradeAll {
nsc.masqueradeAll = true
}

nsc.masqueradeAll = true
if config.RunRouter {
subnet, cidrLen, err := utils.GetPodCidrDetails(config.CniConfFile)
cidr, err := utils.GetPodCidrFromNodeSpec(nsc.client)
if err != nil {
return nil, fmt.Errorf("Failed to get pod CIDR details from CNI conf file: %s", err.Error())
return nil, fmt.Errorf("Failed to get pod CIDR details from Node.spec: %s", err.Error())
}
nsc.masqueradeAll = false
nsc.podCidr = subnet + "/" + strconv.Itoa(cidrLen)
nsc.podCidr = cidr
}

nodeHostName, err := os.Hostname()
Expand Down
11 changes: 6 additions & 5 deletions app/options/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,29 +17,30 @@ type KubeRouterConfig struct {
RunServiceProxy bool
RunFirewall bool
RunRouter bool
CniConfFile string
MasqueradeAll bool
}

func NewKubeRouterConfig() *KubeRouterConfig {
return &KubeRouterConfig{ConfigSyncPeriod: 1 * time.Minute,
IpvsSyncPeriod: 1 * time.Minute,
IPTablesSyncPeriod: 1 * time.Minute,
RoutesSyncPeriod: 1 * time.Minute,
MasqueradeAll: false,
RunServiceProxy: true,
RunFirewall: true,
RunRouter: false}
}

func (s *KubeRouterConfig) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&s.RunServiceProxy, "run-service-proxy", s.RunServiceProxy, "If false, kube-router wont setup IPVS for services proxy. True by default.")
fs.BoolVar(&s.RunFirewall, "run-firewall", s.RunFirewall, "If false, kube-router wont setup iptables to provide ingress firewall for pods. True by default.")
fs.BoolVar(&s.RunRouter, "run-router", s.RunRouter, "If true each node advertise routes the rest of the nodes and learn the routes for the pods. True by default.")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
fs.BoolVar(&s.CleanupConfig, "cleanup-config", s.CleanupConfig, "If true cleanup iptables rules, ipvs, ipset configuration and exit.")
fs.BoolVar(&s.MasqueradeAll, "masquerade-all", s.MasqueradeAll, "SNAT all traffic to cluster IP/node port. False by default")
fs.DurationVar(&s.ConfigSyncPeriod, "config-sync-period", s.ConfigSyncPeriod, "How often configuration from the apiserver is refreshed. Must be greater than 0.")
fs.DurationVar(&s.IPTablesSyncPeriod, "iptables-sync-period", s.IPTablesSyncPeriod, "The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m'). Must be greater than 0.")
fs.DurationVar(&s.IpvsSyncPeriod, "ipvs-sync-period", s.IpvsSyncPeriod, "The maximum interval of how often ipvs config is refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.")
fs.DurationVar(&s.RoutesSyncPeriod, "routes-sync-period", s.RoutesSyncPeriod, "The maximum interval of how often routes are adrvertised and learned (e.g. '5s', '1m', '2h22m'). Must be greater than 0.")
fs.BoolVar(&s.RunServiceProxy, "run-service-proxy", s.RunServiceProxy, "If false, kube-router wont setup IPVS for services proxy. True by default.")
fs.BoolVar(&s.RunFirewall, "run-firewall", s.RunFirewall, "If false, kube-router wont setup iptables to provide ingress firewall for pods. True by default.")
fs.BoolVar(&s.RunRouter, "run-router", s.RunRouter, "If true each node advertise routes the rest of the nodes and learn the routes for the pods. False by default.")
fs.StringVar(&s.CniConfFile, "cni-conf-file", s.CniConfFile, "Full path to CNI configuration file.")
}
7 changes: 7 additions & 0 deletions app/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,13 @@ type KubeRouter struct {

func NewKubeRouterDefault(config *options.KubeRouterConfig) (*KubeRouter, error) {

if len(config.Master) == 0 || len(config.Kubeconfig) == 0 {
if _, err := os.Stat("/var/lib/kube-router/kubeconfig"); os.IsNotExist(err) {
panic("Either one of --master or --kubeconfig must be specified. Or valid kubeconfig file must exist as /var/lib/kube-router/kubeconfig")
}
config.Kubeconfig = "/var/lib/kube-router/kubeconfig"
}

clientconfig, err := clientcmd.BuildConfigFromFlags(config.Master, config.Kubeconfig)
if err != nil {
panic(err.Error())
Expand Down
44 changes: 44 additions & 0 deletions kube-router-daemonset.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-router
namespace: kube-system
labels:
app: kube-router
spec:
template:
metadata:
labels:
name: kube-router
spec:
hostNetwork: true
containers:
- name: kube-router
image: cloudnativelabs/kube-router
args: ["--run-router=false"]
securityContext:
privileged: true
imagePullPolicy: Always
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /etc/cni/net.d/10-kuberouter.conf
name: cni-conf-dir
- mountPath: /var/lib/kube-router/kubeconfig
name: kubeconfig
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d/10-kuberouter.conf
- name: kubeconfig
hostPath:
path: /var/lib/kube-router/kubeconfig
46 changes: 41 additions & 5 deletions utils/pod_cidr.go
Original file line number Diff line number Diff line change
@@ -1,24 +1,60 @@
package utils

import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"

"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/plugins/ipam/host-local/backend/allocator"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)

func GetPodCidrDetails(cniConfFilePath string) (string, int, error) {
func GetPodCidrFromCniSpec(cniConfFilePath string) (net.IPNet, error) {
netconfig, err := libcni.ConfFromFile(cniConfFilePath)
if err != nil {
return "", 0, fmt.Errorf("Failed to load CNI conf: %s", err.Error())
return net.IPNet{}, fmt.Errorf("Failed to load CNI conf file: %s", err.Error())
}

var ipamConfig *allocator.IPAMConfig
ipamConfig, _, err = allocator.LoadIPAMConfig(netconfig.Bytes, "")
if err != nil {
return "", 0, fmt.Errorf("Failed to get IPAM details from the CNI conf file: %s", err.Error())
return net.IPNet{}, fmt.Errorf("Failed to get IPAM details from the CNI conf file: %s", err.Error())
}
return net.IPNet(ipamConfig.Subnet), nil
}

func InsertPodCidrInCniSpec(cniConfFilePath string, cidr string) error {
file, err := ioutil.ReadFile(cniConfFilePath)
if err != nil {
return fmt.Errorf("Failed to load CNI conf file: %s", err.Error())
}
config := make(map[string]interface{})
err = json.Unmarshal(file, &config)
if err != nil {
return fmt.Errorf("Failed to parse JSON from CNI conf file: %s", err.Error())
}

config["ipam"].(map[string]interface{})["subnet"] = cidr
configJson, _ := json.Marshal(config)
err = ioutil.WriteFile(cniConfFilePath, configJson, 0644)
if err != nil {
return fmt.Errorf("Failed to insert subnet cidr into CNI conf file: %s", err.Error())
}
return nil
}

cidrlen, _ := ipamConfig.Subnet.Mask.Size()
return ipamConfig.Subnet.IP.String(), cidrlen, nil
func GetPodCidrFromNodeSpec(clientset *kubernetes.Clientset) (string, error) {
nodeHostName, err := os.Hostname()
if err != nil {
panic(err.Error())
}
node, err := clientset.Core().Nodes().Get(nodeHostName, metav1.GetOptions{})
if err != nil {
panic(err.Error())
}
return node.Spec.PodCIDR, nil
}

0 comments on commit f8fbb42

Please sign in to comment.