mirror of
https://github.com/ilyakooo0/nixpkgs.git
synced 2024-12-26 21:33:03 +03:00
kubernetes module: support for kubernetes 1.4
This commit is contained in:
parent
8ef3eaeb4e
commit
5bc7ae7adb
@ -5,28 +5,62 @@ with lib;
|
||||
let
|
||||
cfg = config.services.kubernetes;
|
||||
|
||||
skipAttrs = attrs: map (filterAttrs (k: v: k != "enable"))
|
||||
(filter (v: !(hasAttr "enable" v) || v.enable) attrs);
|
||||
|
||||
infraContainer = pkgs.dockerTools.buildImage {
|
||||
name = "pause";
|
||||
tag = "latest";
|
||||
contents = cfg.package.pause;
|
||||
config.Cmd = "/bin/pause";
|
||||
};
|
||||
|
||||
kubeconfig = pkgs.writeText "kubeconfig" (builtins.toJSON {
|
||||
apiVersion = "v1";
|
||||
kind = "Config";
|
||||
clusters = [{
|
||||
name = "local";
|
||||
cluster.certificate-authority = cfg.kubeconfig.caFile;
|
||||
cluster.server = cfg.kubeconfig.server;
|
||||
}];
|
||||
users = [{
|
||||
name = "kubelet";
|
||||
user = {
|
||||
client-certificate = cfg.kubeconfig.certFile;
|
||||
client-key = cfg.kubeconfig.keyFile;
|
||||
};
|
||||
}];
|
||||
contexts = [{
|
||||
context = {
|
||||
cluster = "local";
|
||||
user = "kubelet";
|
||||
};
|
||||
current-context = "kubelet-context";
|
||||
}];
|
||||
});
|
||||
|
||||
policyFile = pkgs.writeText "kube-policy"
|
||||
concatStringsSep "\n" (map (builtins.toJSON cfg.apiserver.authorizationPolicy));
|
||||
|
||||
cniConfig = pkgs.buildEnv {
|
||||
name = "kubernetes-cni-config";
|
||||
paths = imap (i: entry:
|
||||
pkgs.writeTextDir "${10+i}-${entry.type}.conf" (builtins.toJSON entry)
|
||||
) cfg.kubelet.cni.config;
|
||||
};
|
||||
|
||||
manifests = pkgs.buildEnv {
|
||||
name = "kubernetes-manifests";
|
||||
paths = mapAttrsToList (name: manifest:
|
||||
pkgs.writeTextDir "${name}.json" (builtins.toJSON manifest)
|
||||
) cfg.kubelet.manifests;
|
||||
};
|
||||
|
||||
in {
|
||||
|
||||
###### interface
|
||||
|
||||
options.services.kubernetes = {
|
||||
package = mkOption {
|
||||
description = "Kubernetes package to use.";
|
||||
type = types.package;
|
||||
};
|
||||
|
||||
verbose = mkOption {
|
||||
description = "Kubernetes enable verbose mode for debugging";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
etcdServers = mkOption {
|
||||
description = "Kubernetes list of etcd servers to watch.";
|
||||
default = [ "127.0.0.1:2379" ];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
roles = mkOption {
|
||||
description = ''
|
||||
Kubernetes role that this machine should take.
|
||||
@ -38,18 +72,76 @@ in {
|
||||
type = types.listOf (types.enum ["master" "node"]);
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
description = "Kubernetes package to use.";
|
||||
type = types.package;
|
||||
default = pkgs.kubernetes;
|
||||
};
|
||||
|
||||
verbose = mkOption {
|
||||
description = "Kubernetes enable verbose mode for debugging";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
etcd = {
|
||||
servers = mkOption {
|
||||
description = "List of etcd servers. By default etcd is started, except if this option is changed.";
|
||||
default = ["http://127.0.0.1:2379"];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
keyFile = mkOption {
|
||||
description = "Etcd key file";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
certFile = mkOption {
|
||||
description = "Etcd cert file";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
caFile = mkOption {
|
||||
description = "Etcd ca file";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
};
|
||||
|
||||
kubeconfig = {
|
||||
server = mkOption {
|
||||
description = "Kubernetes apiserver server address";
|
||||
default = "http://${cfg.apiserver.address}:${toString cfg.apiserver.port}";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
caFile = mkOption {
|
||||
description = "Certificate authrority file to use to connect to kuberentes apiserver";
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
};
|
||||
|
||||
certFile = mkOption {
|
||||
description = "Client certificate file to use to connect to kubernetes";
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
};
|
||||
|
||||
keyFile = mkOption {
|
||||
description = "Client key file to use to connect to kubernetes";
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
|
||||
dataDir = mkOption {
|
||||
description = "Kubernetes root directory for managing kubelet files.";
|
||||
default = "/var/lib/kubernetes";
|
||||
type = types.path;
|
||||
};
|
||||
|
||||
dockerCfg = mkOption {
|
||||
description = "Kubernetes contents of dockercfg file.";
|
||||
default = "";
|
||||
type = types.lines;
|
||||
};
|
||||
|
||||
apiserver = {
|
||||
enable = mkOption {
|
||||
description = "Whether to enable kubernetes apiserver.";
|
||||
@ -72,6 +164,16 @@ in {
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
advertiseAddress = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver IP address on which to advertise the apiserver
|
||||
to members of the cluster. This address must be reachable by the rest
|
||||
of the cluster.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes apiserver listening port.";
|
||||
default = 8080;
|
||||
@ -80,41 +182,36 @@ in {
|
||||
|
||||
securePort = mkOption {
|
||||
description = "Kubernetes apiserver secure port.";
|
||||
default = 6443;
|
||||
default = 443;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
tlsCertFile = mkOption {
|
||||
description = "Kubernetes apiserver certificate file.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
tlsPrivateKeyFile = mkOption {
|
||||
tlsKeyFile = mkOption {
|
||||
description = "Kubernetes apiserver private key file.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
clientCaFile = mkOption {
|
||||
description = "Kubernetes apiserver CA file for client auth.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
tokenAuth = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver token authentication file. See
|
||||
<link xlink:href="http://kubernetes.io/v1.0/docs/admin/authentication.html"/>
|
||||
<link xlink:href="http://kubernetes.io/docs/admin/authentication.html"/>
|
||||
'';
|
||||
default = {};
|
||||
example = literalExample ''
|
||||
{
|
||||
alice = "abc123";
|
||||
bob = "xyz987";
|
||||
}
|
||||
'';
|
||||
type = types.attrsOf types.str;
|
||||
default = null;
|
||||
example = ''token,user,uid,"group1,group2,group3"'';
|
||||
type = types.nullOr types.lines;
|
||||
};
|
||||
|
||||
authorizationMode = mkOption {
|
||||
@ -148,13 +245,13 @@ in {
|
||||
|
||||
allowPrivileged = mkOption {
|
||||
description = "Whether to allow privileged containers on kubernetes.";
|
||||
default = false;
|
||||
default = true;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
portalNet = mkOption {
|
||||
description = "Kubernetes CIDR notation IP range from which to assign portal IPs";
|
||||
default = "10.10.10.10/16";
|
||||
default = "10.10.10.10/24";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
@ -171,9 +268,9 @@ in {
|
||||
admissionControl = mkOption {
|
||||
description = ''
|
||||
Kubernetes admission control plugins to use. See
|
||||
<link xlink:href="http://kubernetes.io/v1.0/docs/admin/admission-controllers.html"/>
|
||||
<link xlink:href="http://kubernetes.io/docs/admin/admission-controllers/"/>
|
||||
'';
|
||||
default = ["AlwaysAdmit"];
|
||||
default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota"];
|
||||
example = [
|
||||
"NamespaceLifecycle" "NamespaceExists" "LimitRanger"
|
||||
"SecurityContextDeny" "ServiceAccount" "ResourceQuota"
|
||||
@ -181,15 +278,40 @@ in {
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
serviceAccountKey = mkOption {
|
||||
serviceAccountKeyFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
|
||||
used to verify ServiceAccount tokens.
|
||||
used to verify ServiceAccount tokens. By default tls private key file
|
||||
is used.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
kubeletClientCaFile = mkOption {
|
||||
description = "Path to a cert file for connecting to kubelet";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
kubeletClientCertFile = mkOption {
|
||||
description = "Client certificate to use for connections to kubelet";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
kubeletClientKeyFile = mkOption {
|
||||
description = "Key to use for connections to kubelet";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
kubeletHttps = mkOption {
|
||||
description = "Whether to use https for connections to kubelet";
|
||||
default = true;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes apiserver extra command line options.";
|
||||
default = "";
|
||||
@ -216,10 +338,10 @@ in {
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
master = mkOption {
|
||||
description = "Kubernetes apiserver address";
|
||||
default = "${cfg.apiserver.address}:${toString cfg.apiserver.port}";
|
||||
type = types.str;
|
||||
leaderElect = mkOption {
|
||||
description = "Whether to start leader election before executing main loop";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
@ -248,13 +370,13 @@ in {
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
master = mkOption {
|
||||
description = "Kubernetes apiserver address";
|
||||
default = "${cfg.apiserver.address}:${toString cfg.apiserver.port}";
|
||||
type = types.str;
|
||||
leaderElect = mkOption {
|
||||
description = "Whether to start leader election before executing main loop";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
serviceAccountPrivateKey = mkOption {
|
||||
serviceAccountKeyFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes controller manager PEM-encoded private RSA key file used to
|
||||
sign service account tokens
|
||||
@ -272,6 +394,12 @@ in {
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
clusterCidr = mkOption {
|
||||
description = "Kubernetes controller manager CIDR Range for Pods in cluster";
|
||||
default = "10.10.0.0/16";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes controller manager extra command line options.";
|
||||
default = "";
|
||||
@ -292,6 +420,12 @@ in {
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
registerSchedulable = mkOption {
|
||||
description = "Register the node as schedulable. No-op if register-node is false.";
|
||||
default = true;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
address = mkOption {
|
||||
description = "Kubernetes kubelet info server listening address.";
|
||||
default = "0.0.0.0";
|
||||
@ -304,6 +438,18 @@ in {
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
tlsCertFile = mkOption {
|
||||
description = "File containing x509 Certificate for HTTPS.";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
tlsKeyFile = mkOption {
|
||||
description = "File containing x509 private key matching tlsCertFile.";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
healthz = {
|
||||
bind = mkOption {
|
||||
description = "Kubernetes kubelet healthz listening address.";
|
||||
@ -326,19 +472,10 @@ in {
|
||||
|
||||
allowPrivileged = mkOption {
|
||||
description = "Whether to allow kubernetes containers to request privileged mode.";
|
||||
default = false;
|
||||
default = true;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
apiServers = mkOption {
|
||||
description = ''
|
||||
Kubernetes kubelet list of Kubernetes API servers for publishing events,
|
||||
and reading pods and services.
|
||||
'';
|
||||
default = ["${cfg.apiserver.address}:${toString cfg.apiserver.port}"];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
cadvisorPort = mkOption {
|
||||
description = "Kubernetes kubelet local cadvisor port.";
|
||||
default = 4194;
|
||||
@ -347,16 +484,62 @@ in {
|
||||
|
||||
clusterDns = mkOption {
|
||||
description = "Use alternative dns.";
|
||||
default = "";
|
||||
default = "10.10.1.1";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
clusterDomain = mkOption {
|
||||
description = "Use alternative domain.";
|
||||
default = "kubernetes.io";
|
||||
default = "cluster.local";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
networkPlugin = mkOption {
|
||||
description = "Network plugin to use by kubernetes";
|
||||
type = types.nullOr (types.enum ["cni" "kubenet"]);
|
||||
default = "kubenet";
|
||||
};
|
||||
|
||||
cni = {
|
||||
packages = mkOption {
|
||||
description = "List of network plugin packages to install";
|
||||
type = types.listOf types.package;
|
||||
default = [];
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
description = "Kubernetes CNI configuration";
|
||||
type = types.listOf types.attrs;
|
||||
default = [];
|
||||
example = literalExample ''
|
||||
[{
|
||||
"cniVersion": "0.2.0",
|
||||
"name": "mynet",
|
||||
"type": "bridge",
|
||||
"bridge": "cni0",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "10.22.0.0/16",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
]
|
||||
}
|
||||
} {
|
||||
"cniVersion": "0.2.0",
|
||||
"type": "loopback"
|
||||
}]
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
manifests = mkOption {
|
||||
description = "List of manifests to bootstrap with kubelet";
|
||||
type = types.attrsOf types.attrs;
|
||||
default = {};
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes kubelet extra command line options.";
|
||||
default = "";
|
||||
@ -377,12 +560,6 @@ in {
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
master = mkOption {
|
||||
description = "Kubernetes apiserver address";
|
||||
default = "${cfg.apiserver.address}:${toString cfg.apiserver.port}";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes proxy extra command line options.";
|
||||
default = "";
|
||||
@ -390,23 +567,23 @@ in {
|
||||
};
|
||||
};
|
||||
|
||||
kube2sky = {
|
||||
enable = mkEnableOption "Whether to enable kube2sky dns service.";
|
||||
dns = {
|
||||
enable = mkEnableOption "kubernetes dns service.";
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes dns listening port";
|
||||
default = 53;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
domain = mkOption {
|
||||
description = "Kuberntes kube2sky domain under which all DNS names will be hosted.";
|
||||
description = "Kuberntes dns domain under which to create names.";
|
||||
default = cfg.kubelet.clusterDomain;
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
master = mkOption {
|
||||
description = "Kubernetes apiserver address";
|
||||
default = "${cfg.apiserver.address}:${toString cfg.apiserver.port}";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes kube2sky extra command line options.";
|
||||
description = "Kubernetes dns extra command line options.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
@ -416,50 +593,118 @@ in {
|
||||
###### implementation
|
||||
|
||||
config = mkMerge [
|
||||
(mkIf cfg.kubelet.enable {
|
||||
systemd.services.kubelet = {
|
||||
description = "Kubernetes Kubelet Service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "docker.service" "kube-apiserver.service" ];
|
||||
path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables ];
|
||||
preStart = ''
|
||||
docker load < ${infraContainer}
|
||||
rm /opt/cni/bin/* || true
|
||||
${concatMapStringsSep "\n" (p: "ln -fs ${p.plugins}/* /opt/cni/bin") cfg.kubelet.cni.packages}
|
||||
'';
|
||||
serviceConfig = {
|
||||
ExecStart = ''${cfg.package}/bin/kubelet \
|
||||
--pod-manifest-path=${manifests} \
|
||||
--kubeconfig=${kubeconfig} \
|
||||
--require-kubeconfig \
|
||||
--address=${cfg.kubelet.address} \
|
||||
--port=${toString cfg.kubelet.port} \
|
||||
--register-node=${if cfg.kubelet.registerNode then "true" else "false"} \
|
||||
--register-schedulable=${if cfg.kubelet.registerSchedulable then "true" else "false"} \
|
||||
${optionalString (cfg.kubelet.tlsCertFile != null)
|
||||
"--tls-cert-file=${cfg.kubelet.tlsCertFile}"} \
|
||||
${optionalString (cfg.kubelet.tlsKeyFile != null)
|
||||
"--tls-private-key-file=${cfg.kubelet.tlsKeyFile}"} \
|
||||
--healthz-bind-address=${cfg.kubelet.healthz.bind} \
|
||||
--healthz-port=${toString cfg.kubelet.healthz.port} \
|
||||
--hostname-override=${cfg.kubelet.hostname} \
|
||||
--allow-privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \
|
||||
--root-dir=${cfg.dataDir} \
|
||||
--cadvisor_port=${toString cfg.kubelet.cadvisorPort} \
|
||||
${optionalString (cfg.kubelet.clusterDns != "")
|
||||
"--cluster-dns=${cfg.kubelet.clusterDns}"} \
|
||||
${optionalString (cfg.kubelet.clusterDomain != "")
|
||||
"--cluster-domain=${cfg.kubelet.clusterDomain}"} \
|
||||
--pod-infra-container-image=pause \
|
||||
${optionalString (cfg.kubelet.networkPlugin != null)
|
||||
"--network-plugin=${cfg.kubelet.networkPlugin}"} \
|
||||
--cni-conf-dir=${cniConfig} \
|
||||
--reconcile-cidr \
|
||||
--hairpin-mode=hairpin-veth \
|
||||
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
|
||||
${cfg.kubelet.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
};
|
||||
};
|
||||
|
||||
environment.etc = mapAttrs' (name: manifest:
|
||||
nameValuePair "kubernetes/manifests/${name}.json" {
|
||||
text = builtins.toJSON manifest;
|
||||
mode = "0755";
|
||||
}
|
||||
) cfg.kubelet.manifests;
|
||||
|
||||
# Allways include cni plugins
|
||||
services.kubernetes.kubelet.cni.packages = [pkgs.cni];
|
||||
})
|
||||
|
||||
(mkIf cfg.apiserver.enable {
|
||||
systemd.services.kube-apiserver = {
|
||||
description = "Kubernetes Api Server";
|
||||
description = "Kubernetes Kubelet Service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = ["kubernetes-setup.service"];
|
||||
after = [ "network.target" "etcd.service" "kubernetes-setup.service" ];
|
||||
after = [ "network.target" "docker.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = let
|
||||
authorizationPolicyFile =
|
||||
pkgs.writeText "kubernetes-policy"
|
||||
(builtins.toJSON cfg.apiserver.authorizationPolicy);
|
||||
tokenAuthFile =
|
||||
pkgs.writeText "kubernetes-auth"
|
||||
(concatImapStringsSep "\n" (i: v: v + "," + (toString i))
|
||||
(mapAttrsToList (name: token: token + "," + name) cfg.apiserver.tokenAuth));
|
||||
in ''${cfg.package}/bin/kube-apiserver \
|
||||
--etcd-servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
|
||||
--insecure-bind-address=${cfg.apiserver.address} \
|
||||
ExecStart = ''${cfg.package}/bin/kube-apiserver \
|
||||
--etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
|
||||
${optionalString (cfg.etcd.caFile != null)
|
||||
"--etcd-cafile=${cfg.etcd.caFile}"} \
|
||||
${optionalString (cfg.etcd.certFile != null)
|
||||
"--etcd-certfile=${cfg.etcd.certFile}"} \
|
||||
${optionalString (cfg.etcd.keyFile != null)
|
||||
"--etcd-keyfile=${cfg.etcd.keyFile}"} \
|
||||
--insecure-port=${toString cfg.apiserver.port} \
|
||||
--bind-address=${cfg.apiserver.publicAddress} \
|
||||
--bind-address=0.0.0.0 \
|
||||
${optionalString (cfg.apiserver.advertiseAddress != null)
|
||||
"--advertise-address=${cfg.apiserver.advertiseAddress}"} \
|
||||
--allow-privileged=${if cfg.apiserver.allowPrivileged then "true" else "false"} \
|
||||
${optionalString (cfg.apiserver.tlsCertFile!="")
|
||||
${optionalString (cfg.apiserver.tlsCertFile != null)
|
||||
"--tls-cert-file=${cfg.apiserver.tlsCertFile}"} \
|
||||
${optionalString (cfg.apiserver.tlsPrivateKeyFile!="")
|
||||
"--tls-private-key-file=${cfg.apiserver.tlsPrivateKeyFile}"} \
|
||||
${optionalString (cfg.apiserver.tokenAuth!=[])
|
||||
"--token-auth-file=${tokenAuthFile}"} \
|
||||
${optionalString (cfg.apiserver.clientCaFile!="")
|
||||
${optionalString (cfg.apiserver.tlsKeyFile != null)
|
||||
"--tls-private-key-file=${cfg.apiserver.tlsKeyFile}"} \
|
||||
${optionalString (cfg.apiserver.tokenAuth != null)
|
||||
"--token-auth-file=${cfg.apiserver.tokenAuth}"} \
|
||||
--kubelet-https=${if cfg.apiserver.kubeletHttps then "true" else "false"} \
|
||||
${optionalString (cfg.apiserver.kubeletClientCaFile != null)
|
||||
"--kubelet-certificate-authority=${cfg.apiserver.kubeletClientCaFile}"} \
|
||||
${optionalString (cfg.apiserver.kubeletClientCertFile != null)
|
||||
"--kubelet-client-certificate=${cfg.apiserver.kubeletClientCertFile}"} \
|
||||
${optionalString (cfg.apiserver.kubeletClientKeyFile != null)
|
||||
"--kubelet-client-key=${cfg.apiserver.kubeletClientKeyFile}"} \
|
||||
${optionalString (cfg.apiserver.clientCaFile != null)
|
||||
"--client-ca-file=${cfg.apiserver.clientCaFile}"} \
|
||||
--authorization-mode=${cfg.apiserver.authorizationMode} \
|
||||
${optionalString (cfg.apiserver.authorizationMode == "ABAC")
|
||||
"--authorization-policy-file=${authorizationPolicyFile}"} \
|
||||
"--authorization-policy-file=${policyFile}"} \
|
||||
--secure-port=${toString cfg.apiserver.securePort} \
|
||||
--service-cluster-ip-range=${cfg.apiserver.portalNet} \
|
||||
${optionalString (cfg.apiserver.runtimeConfig!="")
|
||||
${optionalString (cfg.apiserver.runtimeConfig != "")
|
||||
"--runtime-config=${cfg.apiserver.runtimeConfig}"} \
|
||||
--admission_control=${concatStringsSep "," cfg.apiserver.admissionControl} \
|
||||
${optionalString (cfg.apiserver.serviceAccountKey!=null)
|
||||
"--service-account-key-file=${cfg.apiserver.serviceAccountKey}"} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
|
||||
${optionalString (cfg.apiserver.serviceAccountKeyFile!=null)
|
||||
"--service-account-key-file=${cfg.apiserver.serviceAccountKeyFile}"} \
|
||||
${optionalString cfg.verbose "--v=6"} \
|
||||
${optionalString cfg.verbose "--log-flush-frequency=1s"} \
|
||||
${cfg.apiserver.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
AmbientCapabilities = "cap_net_bind_service";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
};
|
||||
})
|
||||
@ -468,17 +713,20 @@ in {
|
||||
systemd.services.kube-scheduler = {
|
||||
description = "Kubernetes Scheduler Service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "kubernetes-apiserver.service" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''${cfg.package}/bin/kube-scheduler \
|
||||
--address=${cfg.scheduler.address} \
|
||||
--port=${toString cfg.scheduler.port} \
|
||||
--master=${cfg.scheduler.master} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
|
||||
--leader-elect=${if cfg.scheduler.leaderElect then "true" else "false"} \
|
||||
--kubeconfig=${kubeconfig} \
|
||||
${optionalString cfg.verbose "--v=6"} \
|
||||
${optionalString cfg.verbose "--log-flush-frequency=1s"} \
|
||||
${cfg.scheduler.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
};
|
||||
};
|
||||
})
|
||||
@ -487,113 +735,94 @@ in {
|
||||
systemd.services.kube-controller-manager = {
|
||||
description = "Kubernetes Controller Manager Service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "kubernetes-apiserver.service" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''${cfg.package}/bin/kube-controller-manager \
|
||||
--address=${cfg.controllerManager.address} \
|
||||
--port=${toString cfg.controllerManager.port} \
|
||||
--master=${cfg.controllerManager.master} \
|
||||
${optionalString (cfg.controllerManager.serviceAccountPrivateKey!=null)
|
||||
"--service-account-private-key-file=${cfg.controllerManager.serviceAccountPrivateKey}"} \
|
||||
--kubeconfig=${kubeconfig} \
|
||||
--leader-elect=${if cfg.controllerManager.leaderElect then "true" else "false"} \
|
||||
${if (cfg.controllerManager.serviceAccountKeyFile!=null)
|
||||
then "--service-account-private-key-file=${cfg.controllerManager.serviceAccountKeyFile}"
|
||||
else "--service-account-private-key-file=/var/run/kubernetes/apiserver.key"} \
|
||||
${optionalString (cfg.controllerManager.rootCaFile!=null)
|
||||
"--root-ca-file=${cfg.controllerManager.rootCaFile}"} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
|
||||
${optionalString (cfg.controllerManager.clusterCidr!=null)
|
||||
"--cluster-cidr=${cfg.controllerManager.clusterCidr}"} \
|
||||
--allocate-node-cidrs=true \
|
||||
${optionalString cfg.verbose "--v=6"} \
|
||||
${optionalString cfg.verbose "--log-flush-frequency=1s"} \
|
||||
${cfg.controllerManager.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
};
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf cfg.kubelet.enable {
|
||||
systemd.services.kubelet = {
|
||||
description = "Kubernetes Kubelet Service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = ["kubernetes-setup.service"];
|
||||
after = [ "network.target" "etcd.service" "docker.service" "kubernetes-setup.service" ];
|
||||
path = [ pkgs.gitMinimal pkgs.openssh ];
|
||||
script = ''
|
||||
export PATH="/bin:/sbin:/usr/bin:/usr/sbin:$PATH"
|
||||
exec ${cfg.package}/bin/kubelet \
|
||||
--api-servers=${concatMapStringsSep "," (f: "http://${f}") cfg.kubelet.apiServers} \
|
||||
--register-node=${if cfg.kubelet.registerNode then "true" else "false"} \
|
||||
--address=${cfg.kubelet.address} \
|
||||
--port=${toString cfg.kubelet.port} \
|
||||
--healthz-bind-address=${cfg.kubelet.healthz.bind} \
|
||||
--healthz-port=${toString cfg.kubelet.healthz.port} \
|
||||
--hostname-override=${cfg.kubelet.hostname} \
|
||||
--allow-privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \
|
||||
--root-dir=${cfg.dataDir} \
|
||||
--cadvisor_port=${toString cfg.kubelet.cadvisorPort} \
|
||||
${optionalString (cfg.kubelet.clusterDns != "")
|
||||
''--cluster-dns=${cfg.kubelet.clusterDns}''} \
|
||||
${optionalString (cfg.kubelet.clusterDomain != "")
|
||||
''--cluster-domain=${cfg.kubelet.clusterDomain}''} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
|
||||
${cfg.kubelet.extraOpts}
|
||||
'';
|
||||
serviceConfig.WorkingDirectory = cfg.dataDir;
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf cfg.proxy.enable {
|
||||
systemd.services.kube-proxy = {
|
||||
description = "Kubernetes Proxy Service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "etcd.service" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
path = [pkgs.iptables];
|
||||
serviceConfig = {
|
||||
ExecStart = ''${cfg.package}/bin/kube-proxy \
|
||||
--master=${cfg.proxy.master} \
|
||||
--kubeconfig=${kubeconfig} \
|
||||
--bind-address=${cfg.proxy.address} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
|
||||
${cfg.proxy.extraOpts}
|
||||
${optionalString cfg.verbose "--v=6"} \
|
||||
${optionalString cfg.verbose "--log-flush-frequency=1s"} \
|
||||
${cfg.controllerManager.extraOpts}
|
||||
'';
|
||||
Restart = "always"; # Retry connection
|
||||
RestartSec = "5s";
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
};
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf cfg.kube2sky.enable {
|
||||
systemd.services.kube2sky = {
|
||||
description = "Kubernetes Dns Bridge Service";
|
||||
(mkIf cfg.dns.enable {
|
||||
systemd.services.kube-dns = {
|
||||
description = "Kubernetes Dns Service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "skydns.service" "etcd.service" "kubernetes-apiserver.service" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''${cfg.package}/bin/kube2sky \
|
||||
-etcd-server=http://${head cfg.etcdServers} \
|
||||
-domain=${cfg.kube2sky.domain} \
|
||||
-kube_master_url=http://${cfg.kube2sky.master} \
|
||||
-logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
|
||||
${cfg.kube2sky.extraOpts}
|
||||
ExecStart = ''${cfg.package}/bin/kube-dns \
|
||||
--kubecfg-file=${kubeconfig} \
|
||||
--dns-port=${toString cfg.dns.port} \
|
||||
--domain=${cfg.dns.domain} \
|
||||
${optionalString cfg.verbose "--v=6"} \
|
||||
${optionalString cfg.verbose "--log-flush-frequency=1s"} \
|
||||
${cfg.dns.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
AmbientCapabilities = "cap_net_bind_service";
|
||||
SendSIGHUP = true;
|
||||
};
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf cfg.kubelet.enable {
|
||||
boot.kernelModules = ["br_netfilter"];
|
||||
})
|
||||
|
||||
(mkIf (any (el: el == "master") cfg.roles) {
|
||||
virtualisation.docker.enable = mkDefault true;
|
||||
services.kubernetes.kubelet.enable = mkDefault true;
|
||||
services.kubernetes.kubelet.allowPrivileged = mkDefault true;
|
||||
services.kubernetes.apiserver.enable = mkDefault true;
|
||||
services.kubernetes.scheduler.enable = mkDefault true;
|
||||
services.kubernetes.controllerManager.enable = mkDefault true;
|
||||
services.kubernetes.kube2sky.enable = mkDefault true;
|
||||
services.etcd.enable = mkDefault (cfg.etcd.servers == ["http://127.0.0.1:2379"]);
|
||||
})
|
||||
|
||||
(mkIf (any (el: el == "node") cfg.roles) {
|
||||
virtualisation.docker.enable = mkDefault true;
|
||||
virtualisation.docker.logDriver = mkDefault "json-file";
|
||||
services.kubernetes.kubelet.enable = mkDefault true;
|
||||
services.kubernetes.proxy.enable = mkDefault true;
|
||||
})
|
||||
|
||||
(mkIf (any (el: el == "node" || el == "master") cfg.roles) {
|
||||
services.etcd.enable = mkDefault true;
|
||||
|
||||
services.skydns.enable = mkDefault true;
|
||||
services.skydns.domain = mkDefault cfg.kubelet.clusterDomain;
|
||||
services.kubernetes.dns.enable = mkDefault true;
|
||||
})
|
||||
|
||||
(mkIf (
|
||||
@ -601,24 +830,16 @@ in {
|
||||
cfg.scheduler.enable ||
|
||||
cfg.controllerManager.enable ||
|
||||
cfg.kubelet.enable ||
|
||||
cfg.proxy.enable
|
||||
cfg.proxy.enable ||
|
||||
cfg.dns.enable
|
||||
) {
|
||||
systemd.services.kubernetes-setup = {
|
||||
description = "Kubernetes setup.";
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = ''
|
||||
mkdir -p /var/run/kubernetes
|
||||
chown kubernetes /var/lib/kubernetes
|
||||
|
||||
rm ${cfg.dataDir}/.dockercfg || true
|
||||
ln -fs ${pkgs.writeText "kubernetes-dockercfg" cfg.dockerCfg} ${cfg.dataDir}/.dockercfg
|
||||
'';
|
||||
};
|
||||
|
||||
services.kubernetes.package = mkDefault pkgs.kubernetes;
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /opt/cni/bin 0755 root root -"
|
||||
"d /var/run/kubernetes 0755 kubernetes kubernetes -"
|
||||
"d /var/lib/kubernetes 0755 kubernetes kubernetes -"
|
||||
];
|
||||
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
users.extraUsers = singleton {
|
||||
name = "kubernetes";
|
||||
uid = config.ids.uids.kubernetes;
|
||||
@ -630,6 +851,5 @@ in {
|
||||
};
|
||||
users.extraGroups.kubernetes.gid = config.ids.gids.kubernetes;
|
||||
})
|
||||
|
||||
];
|
||||
}
|
||||
|
@ -1,182 +1,408 @@
|
||||
# This test runs two node kubernetes cluster and checks if simple redis pod works
|
||||
{ system ? builtins.currentSystem }:
|
||||
|
||||
import ./make-test.nix ({ pkgs, ...} : rec {
|
||||
name = "kubernetes";
|
||||
meta = with pkgs.stdenv.lib.maintainers; {
|
||||
maintainers = [ offline ];
|
||||
with import ../lib/testing.nix { inherit system; };
|
||||
with import ../lib/qemu-flags.nix;
|
||||
with pkgs.lib;
|
||||
|
||||
let
|
||||
redisPod = pkgs.writeText "redis-master-pod.json" (builtins.toJSON {
|
||||
kind = "Pod";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "redis";
|
||||
metadata.labels.name = "redis";
|
||||
spec.containers = [{
|
||||
name = "redis";
|
||||
image = "redis";
|
||||
args = ["--bind" "0.0.0.0"];
|
||||
imagePullPolicy = "Never";
|
||||
ports = [{
|
||||
name = "redis-server";
|
||||
containerPort = 6379;
|
||||
}];
|
||||
}];
|
||||
});
|
||||
|
||||
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
|
||||
kind = "Service";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "redis";
|
||||
spec = {
|
||||
ports = [{port = 6379; targetPort = 6379;}];
|
||||
selector = {name = "redis";};
|
||||
};
|
||||
});
|
||||
|
||||
redisImage = pkgs.dockerTools.buildImage {
|
||||
name = "redis";
|
||||
tag = "latest";
|
||||
contents = pkgs.redis;
|
||||
config.Entrypoint = "/bin/redis-server";
|
||||
};
|
||||
|
||||
redisMaster = builtins.toFile "redis-master-pod.yaml" ''
|
||||
id: redis-master-pod
|
||||
kind: Pod
|
||||
apiVersion: v1beta1
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: redis-master-pod
|
||||
containers:
|
||||
- name: master
|
||||
image: master:5000/nix
|
||||
cpu: 100
|
||||
ports:
|
||||
- name: redis-server
|
||||
containerPort: 6379
|
||||
hostPort: 6379
|
||||
volumeMounts:
|
||||
- name: nix-store
|
||||
mountPath: /nix/store
|
||||
readOnly: true
|
||||
volumeMounts:
|
||||
- name: system-profile
|
||||
mountPath: /bin
|
||||
readOnly: true
|
||||
command:
|
||||
- /bin/redis-server
|
||||
volumes:
|
||||
- name: nix-store
|
||||
source:
|
||||
hostDir:
|
||||
path: /nix/store
|
||||
- name: system-profile
|
||||
source:
|
||||
hostDir:
|
||||
path: /run/current-system/sw/bin
|
||||
labels:
|
||||
name: redis
|
||||
role: master
|
||||
testSimplePod = ''
|
||||
$kubernetes->execute("docker load < ${redisImage}");
|
||||
$kubernetes->waitUntilSucceeds("kubectl create -f ${redisPod}");
|
||||
$kubernetes->succeed("kubectl create -f ${redisService}");
|
||||
$kubernetes->waitUntilSucceeds("kubectl get pod redis | grep Running");
|
||||
$kubernetes->succeed("nc -z \$\(dig \@10.10.0.1 redis.default.svc.cluster.local +short\) 6379");
|
||||
'';
|
||||
in {
|
||||
# This test runs kubernetes on a single node
|
||||
trivial = makeTest {
|
||||
name = "kubernetes-trivial";
|
||||
|
||||
nodes = {
|
||||
master =
|
||||
{ config, pkgs, lib, nodes, ... }:
|
||||
{
|
||||
virtualisation.memorySize = 768;
|
||||
services.kubernetes = {
|
||||
roles = ["master" "node"];
|
||||
dockerCfg = ''{"master:5000":{}}'';
|
||||
controllerManager.machines = ["master" "node"];
|
||||
apiserver.address = "0.0.0.0";
|
||||
verbose = true;
|
||||
nodes = {
|
||||
kubernetes =
|
||||
{ config, pkgs, lib, nodes, ... }:
|
||||
{
|
||||
virtualisation.memorySize = 768;
|
||||
virtualisation.diskSize = 2048;
|
||||
|
||||
programs.bash.enableCompletion = true;
|
||||
|
||||
services.kubernetes.roles = ["master" "node"];
|
||||
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0";
|
||||
|
||||
networking.bridges.cbr0.interfaces = [];
|
||||
networking.interfaces.cbr0 = {};
|
||||
};
|
||||
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0 --insecure-registry master:5000";
|
||||
};
|
||||
|
||||
services.etcd = {
|
||||
listenPeerUrls = ["http://0.0.0.0:7001"];
|
||||
initialAdvertisePeerUrls = ["http://master:7001"];
|
||||
initialCluster = ["master=http://master:7001" "node=http://node:7001"];
|
||||
};
|
||||
services.dockerRegistry.enable = true;
|
||||
services.dockerRegistry.host = "0.0.0.0";
|
||||
services.dockerRegistry.port = 5000;
|
||||
testScript = ''
|
||||
startAll;
|
||||
|
||||
virtualisation.vlans = [ 1 2 ];
|
||||
networking.bridges = {
|
||||
cbr0.interfaces = [ "eth2" ];
|
||||
};
|
||||
networking.interfaces = {
|
||||
cbr0 = {
|
||||
ipAddress = "10.10.0.1";
|
||||
prefixLength = 24;
|
||||
};
|
||||
eth2.ip4 = lib.mkOverride 0 [ ];
|
||||
};
|
||||
networking.localCommands = ''
|
||||
ip route add 10.10.0.0/16 dev cbr0
|
||||
ip route flush cache
|
||||
'';
|
||||
networking.extraHosts = "127.0.0.1 master";
|
||||
$kubernetes->waitUntilSucceeds("kubectl get nodes | grep kubernetes | grep Ready");
|
||||
|
||||
networking.firewall.enable = false;
|
||||
#networking.firewall.allowedTCPPorts = [ 4001 7001 ];
|
||||
|
||||
environment.systemPackages = [ pkgs.redis ];
|
||||
};
|
||||
|
||||
node =
|
||||
{ config, pkgs, lib, nodes, ... }:
|
||||
{
|
||||
services.kubernetes = {
|
||||
roles = ["node"];
|
||||
dockerCfg = ''{"master:5000":{}}'';
|
||||
kubelet.apiServers = ["master:8080"];
|
||||
verbose = true;
|
||||
};
|
||||
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0 --insecure-registry master:5000";
|
||||
services.etcd = {
|
||||
listenPeerUrls = ["http://0.0.0.0:7001"];
|
||||
initialAdvertisePeerUrls = ["http://node:7001"];
|
||||
initialCluster = ["master=http://master:7001" "node=http://node:7001"];
|
||||
};
|
||||
|
||||
virtualisation.vlans = [ 1 2 ];
|
||||
networking.bridges = {
|
||||
cbr0.interfaces = [ "eth2" ];
|
||||
};
|
||||
networking.interfaces = {
|
||||
cbr0 = {
|
||||
ipAddress = "10.10.1.1";
|
||||
prefixLength = 24;
|
||||
};
|
||||
eth2.ip4 = lib.mkOverride 0 [ ];
|
||||
};
|
||||
networking.localCommands = ''
|
||||
ip route add 10.10.0.0/16 dev cbr0
|
||||
ip route flush cache
|
||||
'';
|
||||
networking.extraHosts = "127.0.0.1 node";
|
||||
|
||||
networking.firewall.enable = false;
|
||||
#networking.firewall.allowedTCPPorts = [ 4001 7001 ];
|
||||
|
||||
environment.systemPackages = [ pkgs.redis ];
|
||||
};
|
||||
|
||||
client =
|
||||
{ config, pkgs, nodes, ... }:
|
||||
{
|
||||
virtualisation.docker.enable = true;
|
||||
virtualisation.docker.extraOptions = "--insecure-registry master:5000";
|
||||
environment.systemPackages = [ pkgs.kubernetes ];
|
||||
environment.etc."test/redis-master-pod.yaml".source = redisMaster;
|
||||
environment.etc."test/pause".source = "${pkgs.kubernetes}/bin/kube-pause";
|
||||
environment.etc."test/Dockerfile".source = pkgs.writeText "Dockerfile" ''
|
||||
FROM scratch
|
||||
ADD pause /
|
||||
ENTRYPOINT ["/pause"]
|
||||
'';
|
||||
};
|
||||
${testSimplePod}
|
||||
'';
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
startAll;
|
||||
cluster = let
|
||||
runWithOpenSSL = file: cmd: pkgs.runCommand file {
|
||||
buildInputs = [ pkgs.openssl ];
|
||||
} cmd;
|
||||
|
||||
$master->waitForUnit("kubernetes-apiserver.service");
|
||||
$master->waitForUnit("kubernetes-scheduler.service");
|
||||
$master->waitForUnit("kubernetes-controller-manager.service");
|
||||
$master->waitForUnit("kubernetes-kubelet.service");
|
||||
$master->waitForUnit("kubernetes-proxy.service");
|
||||
ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
|
||||
ca_pem = runWithOpenSSL "ca.pem" ''
|
||||
openssl req \
|
||||
-x509 -new -nodes -key ${ca_key} \
|
||||
-days 10000 -out $out -subj "/CN=etcd-ca"
|
||||
'';
|
||||
etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048";
|
||||
etcd_csr = runWithOpenSSL "etcd.csr" ''
|
||||
openssl req \
|
||||
-new -key ${etcd_key} \
|
||||
-out $out -subj "/CN=etcd" \
|
||||
-config ${openssl_cnf}
|
||||
'';
|
||||
etcd_cert = runWithOpenSSL "etcd.pem" ''
|
||||
openssl x509 \
|
||||
-req -in ${etcd_csr} \
|
||||
-CA ${ca_pem} -CAkey ${ca_key} \
|
||||
-CAcreateserial -out $out \
|
||||
-days 365 -extensions v3_req \
|
||||
-extfile ${openssl_cnf}
|
||||
'';
|
||||
|
||||
$node->waitForUnit("kubernetes-kubelet.service");
|
||||
$node->waitForUnit("kubernetes-proxy.service");
|
||||
etcd_client_key = runWithOpenSSL "etcd-client-key.pem"
|
||||
"openssl genrsa -out $out 2048";
|
||||
|
||||
$master->waitUntilSucceeds("kubectl get minions | grep master");
|
||||
$master->waitUntilSucceeds("kubectl get minions | grep node");
|
||||
etcd_client_csr = runWithOpenSSL "etcd-client-key.pem" ''
|
||||
openssl req \
|
||||
-new -key ${etcd_client_key} \
|
||||
-out $out -subj "/CN=etcd-client" \
|
||||
-config ${client_openssl_cnf}
|
||||
'';
|
||||
|
||||
$client->waitForUnit("docker.service");
|
||||
$client->succeed("tar cv --files-from /dev/null | docker import - nix");
|
||||
$client->succeed("docker tag nix master:5000/nix");
|
||||
$master->waitForUnit("docker-registry.service");
|
||||
$client->succeed("docker push master:5000/nix");
|
||||
$client->succeed("mkdir -p /root/pause");
|
||||
$client->succeed("cp /etc/test/pause /root/pause/");
|
||||
$client->succeed("cp /etc/test/Dockerfile /root/pause/");
|
||||
$client->succeed("cd /root/pause && docker build -t master:5000/pause .");
|
||||
$client->succeed("docker push master:5000/pause");
|
||||
etcd_client_cert = runWithOpenSSL "etcd-client.crt" ''
|
||||
openssl x509 \
|
||||
-req -in ${etcd_client_csr} \
|
||||
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
|
||||
-out $out -days 365 -extensions v3_req \
|
||||
-extfile ${client_openssl_cnf}
|
||||
'';
|
||||
|
||||
subtest "simple pod", sub {
|
||||
$client->succeed("kubectl create -f ${redisMaster} -s http://master:8080");
|
||||
$client->waitUntilSucceeds("kubectl get pods -s http://master:8080 | grep redis-master | grep -i running");
|
||||
}
|
||||
apiserver_key = runWithOpenSSL "apiserver-key.pem" "openssl genrsa -out $out 2048";
|
||||
|
||||
'';
|
||||
})
|
||||
apiserver_csr = runWithOpenSSL "apiserver.csr" ''
|
||||
openssl req \
|
||||
-new -key ${apiserver_key} \
|
||||
-out $out -subj "/CN=kube-apiserver" \
|
||||
-config ${apiserver_cnf}
|
||||
'';
|
||||
|
||||
apiserver_cert = runWithOpenSSL "apiserver.pem" ''
|
||||
openssl x509 \
|
||||
-req -in ${apiserver_csr} \
|
||||
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
|
||||
-out $out -days 365 -extensions v3_req \
|
||||
-extfile ${apiserver_cnf}
|
||||
'';
|
||||
|
||||
worker_key = runWithOpenSSL "worker-key.pem" "openssl genrsa -out $out 2048";
|
||||
|
||||
worker_csr = runWithOpenSSL "worker.csr" ''
|
||||
openssl req \
|
||||
-new -key ${worker_key} \
|
||||
-out $out -subj "/CN=kube-worker" \
|
||||
-config ${worker_cnf}
|
||||
'';
|
||||
|
||||
worker_cert = runWithOpenSSL "worker.pem" ''
|
||||
openssl x509 \
|
||||
-req -in ${worker_csr} \
|
||||
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
|
||||
-out $out -days 365 -extensions v3_req \
|
||||
-extfile ${worker_cnf}
|
||||
'';
|
||||
|
||||
openssl_cnf = pkgs.writeText "openssl.cnf" ''
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = serverAuth
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = etcd1
|
||||
DNS.2 = etcd2
|
||||
DNS.3 = etcd3
|
||||
IP.1 = 127.0.0.1
|
||||
'';
|
||||
|
||||
client_openssl_cnf = pkgs.writeText "client-openssl.cnf" ''
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = clientAuth
|
||||
'';
|
||||
|
||||
apiserver_cnf = pkgs.writeText "apiserver-openssl.cnf" ''
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = kubernetes
|
||||
DNS.2 = kubernetes.default
|
||||
DNS.3 = kubernetes.default.svc
|
||||
DNS.4 = kubernetes.default.svc.cluster.local
|
||||
IP.1 = 10.10.10.1
|
||||
'';
|
||||
|
||||
worker_cnf = pkgs.writeText "worker-openssl.cnf" ''
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = kubeWorker1
|
||||
DNS.2 = kubeWorker2
|
||||
'';
|
||||
|
||||
etcdNodeConfig = {
|
||||
virtualisation.memorySize = 128;
|
||||
|
||||
services = {
|
||||
etcd = {
|
||||
enable = true;
|
||||
keyFile = etcd_key;
|
||||
certFile = etcd_cert;
|
||||
trustedCaFile = ca_pem;
|
||||
peerClientCertAuth = true;
|
||||
listenClientUrls = ["https://0.0.0.0:2379"];
|
||||
listenPeerUrls = ["https://0.0.0.0:2380"];
|
||||
};
|
||||
};
|
||||
|
||||
environment.variables = {
|
||||
ETCDCTL_CERT_FILE = "${etcd_client_cert}";
|
||||
ETCDCTL_KEY_FILE = "${etcd_client_key}";
|
||||
ETCDCTL_CA_FILE = "${ca_pem}";
|
||||
ETCDCTL_PEERS = "https://127.0.0.1:2379";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 2379 2380 ];
|
||||
};
|
||||
|
||||
kubeConfig = {
|
||||
virtualisation.diskSize = 2048;
|
||||
programs.bash.enableCompletion = true;
|
||||
|
||||
services.flannel = {
|
||||
enable = true;
|
||||
network = "10.10.0.0/16";
|
||||
iface = "eth1";
|
||||
etcd = {
|
||||
endpoints = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"];
|
||||
keyFile = etcd_client_key;
|
||||
certFile = etcd_client_cert;
|
||||
caFile = ca_pem;
|
||||
};
|
||||
};
|
||||
|
||||
# vxlan
|
||||
networking.firewall.allowedUDPPorts = [ 8472 ];
|
||||
|
||||
systemd.services.docker.after = ["flannel.service"];
|
||||
systemd.services.docker.serviceConfig.EnvironmentFile = "/run/flannel/subnet.env";
|
||||
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false --bip $FLANNEL_SUBNET";
|
||||
|
||||
services.kubernetes.verbose = true;
|
||||
services.kubernetes.etcd = {
|
||||
servers = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"];
|
||||
keyFile = etcd_client_key;
|
||||
certFile = etcd_client_cert;
|
||||
caFile = ca_pem;
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.bind pkgs.tcpdump pkgs.utillinux ];
|
||||
};
|
||||
|
||||
kubeMasterConfig = {pkgs, ...}: {
|
||||
require = [kubeConfig];
|
||||
|
||||
# kube apiserver
|
||||
networking.firewall.allowedTCPPorts = [ 443 ];
|
||||
|
||||
virtualisation.memorySize = 512;
|
||||
|
||||
services.kubernetes = {
|
||||
roles = ["master"];
|
||||
scheduler.leaderElect = true;
|
||||
controllerManager.leaderElect = true;
|
||||
|
||||
apiserver = {
|
||||
publicAddress = "0.0.0.0";
|
||||
advertiseAddress = "192.168.1.8";
|
||||
tlsKeyFile = apiserver_key;
|
||||
tlsCertFile = apiserver_cert;
|
||||
clientCaFile = ca_pem;
|
||||
kubeletClientCaFile = ca_pem;
|
||||
kubeletClientKeyFile = worker_key;
|
||||
kubeletClientCertFile = worker_cert;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
kubeWorkerConfig = { pkgs, ... }: {
|
||||
require = [kubeConfig];
|
||||
|
||||
virtualisation.memorySize = 512;
|
||||
|
||||
# kubelet
|
||||
networking.firewall.allowedTCPPorts = [ 10250 ];
|
||||
|
||||
services.kubernetes = {
|
||||
roles = ["node"];
|
||||
kubeconfig = {
|
||||
server = "https://kubernetes:443";
|
||||
caFile = ca_pem;
|
||||
certFile = worker_cert;
|
||||
keyFile = worker_key;
|
||||
};
|
||||
kubelet = {
|
||||
tlsKeyFile = worker_key;
|
||||
tlsCertFile = worker_cert;
|
||||
};
|
||||
};
|
||||
};
|
||||
in makeTest {
|
||||
name = "kubernetes-cluster";
|
||||
|
||||
nodes = {
|
||||
etcd1 = { config, pkgs, nodes, ... }: {
|
||||
require = [etcdNodeConfig];
|
||||
services.etcd = {
|
||||
advertiseClientUrls = ["https://etcd1:2379"];
|
||||
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
|
||||
initialAdvertisePeerUrls = ["https://etcd1:2380"];
|
||||
};
|
||||
};
|
||||
|
||||
etcd2 = { config, pkgs, ... }: {
|
||||
require = [etcdNodeConfig];
|
||||
services.etcd = {
|
||||
advertiseClientUrls = ["https://etcd2:2379"];
|
||||
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
|
||||
initialAdvertisePeerUrls = ["https://etcd2:2380"];
|
||||
};
|
||||
};
|
||||
|
||||
etcd3 = { config, pkgs, ... }: {
|
||||
require = [etcdNodeConfig];
|
||||
services.etcd = {
|
||||
advertiseClientUrls = ["https://etcd3:2379"];
|
||||
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
|
||||
initialAdvertisePeerUrls = ["https://etcd3:2380"];
|
||||
};
|
||||
};
|
||||
|
||||
kubeMaster1 = { config, pkgs, lib, nodes, ... }: {
|
||||
require = [kubeMasterConfig];
|
||||
};
|
||||
|
||||
kubeMaster2 = { config, pkgs, lib, nodes, ... }: {
|
||||
require = [kubeMasterConfig];
|
||||
};
|
||||
|
||||
# Kubernetes TCP load balancer
|
||||
kubernetes = { config, pkgs, ... }: {
|
||||
# kubernetes
|
||||
networking.firewall.allowedTCPPorts = [ 443 ];
|
||||
|
||||
services.haproxy.enable = true;
|
||||
services.haproxy.config = ''
|
||||
global
|
||||
log 127.0.0.1 local0 notice
|
||||
user haproxy
|
||||
group haproxy
|
||||
|
||||
defaults
|
||||
log global
|
||||
retries 2
|
||||
timeout connect 3000
|
||||
timeout server 5000
|
||||
timeout client 5000
|
||||
|
||||
listen kubernetes
|
||||
bind 0.0.0.0:443
|
||||
mode tcp
|
||||
option ssl-hello-chk
|
||||
balance roundrobin
|
||||
server kube-master-1 kubeMaster1:443 check
|
||||
server kube-master-2 kubeMaster2:443 check
|
||||
'';
|
||||
};
|
||||
|
||||
kubeWorker1 = { config, pkgs, lib, nodes, ... }: {
|
||||
require = [kubeWorkerConfig];
|
||||
};
|
||||
|
||||
kubeWorker2 = { config, pkgs, lib, nodes, ... }: {
|
||||
require = [kubeWorkerConfig];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
startAll;
|
||||
|
||||
${testSimplePod}
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
36
pkgs/applications/networking/cluster/cni/default.nix
Normal file
36
pkgs/applications/networking/cluster/cni/default.nix
Normal file
@ -0,0 +1,36 @@
|
||||
{ stdenv, fetchFromGitHub, go }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "cni-${version}";
|
||||
version = "0.3.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "containernetworking";
|
||||
repo = "cni";
|
||||
rev = "v${version}";
|
||||
sha256 = "1nvixvf5slnsdrfpfs2km64x680wf83jbyp7il12bcim37q2az7m";
|
||||
};
|
||||
|
||||
buildInputs = [ go ];
|
||||
|
||||
outputs = ["out" "plugins"];
|
||||
|
||||
buildPhase = ''
|
||||
patchShebangs build
|
||||
./build
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir -p $out/bin $plugins
|
||||
mv bin/cnitool $out/bin
|
||||
mv bin/* $plugins/
|
||||
'';
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
description = "Container Network Interface - networking for Linux containers";
|
||||
license = licenses.asl20;
|
||||
homepage = https://github.com/containernetworking/cni;
|
||||
maintainers = with maintainers; [offline];
|
||||
platforms = [ "x86_64-linux" ];
|
||||
};
|
||||
}
|
@ -48,9 +48,6 @@ stdenv.mkDerivation rec {
|
||||
'';
|
||||
|
||||
preFixup = ''
|
||||
wrapProgram "$out/bin/kube-proxy" --prefix PATH : "${iptables}/bin"
|
||||
wrapProgram "$out/bin/kubelet" --prefix PATH : "${coreutils}/bin"
|
||||
|
||||
# Remove references to go compiler
|
||||
while read file; do
|
||||
cat $file | sed "s,${go},$(echo "${go}" | sed "s,$NIX_STORE/[^-]*,$NIX_STORE/eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee,"),g" > $file.tmp
|
||||
|
@ -12325,6 +12325,8 @@ in
|
||||
pulseaudioSupport = config.pulseaudio or false;
|
||||
};
|
||||
|
||||
cni = callPackage ../applications/networking/cluster/cni {};
|
||||
|
||||
communi = qt5.callPackage ../applications/networking/irc/communi { };
|
||||
|
||||
compiz = callPackage ../applications/window-managers/compiz {
|
||||
|
Loading…
Reference in New Issue
Block a user