Merge pull request #41073 from johanot/kube-1.10

kubernetes: 1.9.7 -> 1.10.3
This commit is contained in:
Sarah Brofeldt 2018-05-26 11:35:37 +02:00 committed by GitHub
commit 2052c1687e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 208 additions and 88 deletions

View File

@ -1864,6 +1864,11 @@
email = "me@joelt.io";
name = "Joel Taylor";
};
johanot = {
email = "write@ownrisk.dk";
github = "johanot";
name = "Johan Thomsen";
};
johbo = {
email = "johannes@bornhold.name";
github = "johbo";

View File

@ -241,6 +241,18 @@ $ nix-instantiate -E '(import <nixpkgsunstable> {}).gitFull'
<literal>networking.networkmanager.dns</literal> instead.
</para>
</listitem>
<listitem>
<para>
The option <varname>services.kubernetes.apiserver.admissionControl</varname>
was renamed to <varname>services.kubernetes.apiserver.enableAdmissionPlugins</varname>.
</para>
</listitem>
<listitem>
<para>
Recommented way to access the Kubernetes Dashboard is with HTTPS (TLS)
Therefore; public service port for the dashboard has changed to 443 (container port 8443) and scheme to https.
</para>
</listitem>
</itemizedlist>
</section>
</section>

View File

@ -31,6 +31,7 @@ with lib;
(mkRenamedOptionModule [ "services" "graphite" "web" "host" ] [ "services" "graphite" "web" "listenAddress" ])
(mkRenamedOptionModule [ "services" "i2pd" "extIp" ] [ "services" "i2pd" "address" ])
(mkRenamedOptionModule [ "services" "kibana" "host" ] [ "services" "kibana" "listenAddress" ])
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
(mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])
(mkRenamedOptionModule [ "services" "mpd" "network" "host" ] [ "services" "mpd" "network" "listenAddress" ])
(mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "listenAddress" ])

View File

@ -5,14 +5,14 @@ with lib;
let
cfg = config.services.kubernetes.addons.dashboard;
name = "gcr.io/google_containers/kubernetes-dashboard-amd64";
version = "v1.8.2";
name = "k8s.gcr.io/kubernetes-dashboard-amd64";
version = "v1.8.3";
image = pkgs.dockerTools.pullImage {
imageName = name;
imageDigest = "sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0";
finalImageTag = version;
sha256 = "11h0fz3wxp0f10fsyqaxjm7l2qg7xws50dv5iwlck5gb1fjmajad";
imageDigest = "sha256:e7984d10351601080bbc146635d51f0cfbea31ca6f0df323cf7a58cf2f6a68df";
sha256 = "18ajcg0q1vignfjk2sm4xj4wzphfz8wah69ps8dklqfvv0164mc8";
};
in {
options.services.kubernetes.addons.dashboard = {
@ -31,7 +31,7 @@ in {
services.kubernetes.addonManager.addons = {
kubernetes-dashboard-deployment = {
kind = "Deployment";
apiVersion = "apps/v1beta1";
apiVersion = "apps/v1";
metadata = {
labels = {
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
@ -57,40 +57,61 @@ in {
};
annotations = {
"scheduler.alpha.kubernetes.io/critical-pod" = "";
#"scheduler.alpha.kubernetes.io/tolerations" = ''[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'';
};
};
spec = {
priorityClassName = "system-cluster-critical";
containers = [{
name = "kubernetes-dashboard";
image = "${name}:${version}";
ports = [{
containerPort = 9090;
containerPort = 8443;
protocol = "TCP";
}];
resources = {
limits = {
cpu = "100m";
memory = "250Mi";
memory = "300Mi";
};
requests = {
cpu = "100m";
memory = "50Mi";
memory = "100Mi";
};
};
args = ["--auto-generate-certificates"];
volumeMounts = [{
name = "tmp-volume";
mountPath = "/tmp";
} {
name = "kubernetes-dashboard-certs";
mountPath = "/certs";
}];
livenessProbe = {
httpGet = {
scheme = "HTTPS";
path = "/";
port = 9090;
port = 8443;
};
initialDelaySeconds = 30;
timeoutSeconds = 30;
};
}];
volumes = [{
name = "kubernetes-dashboard-certs";
secret = {
secretName = "kubernetes-dashboard-certs";
};
} {
name = "tmp-volume";
emptyDir = {};
}];
serviceAccountName = "kubernetes-dashboard";
tolerations = [{
key = "node-role.kubernetes.io/master";
effect = "NoSchedule";
} {
key = "CriticalAddonsOnly";
operator = "Exists";
}];
};
};
@ -113,8 +134,8 @@ in {
};
spec = {
ports = [{
port = 80;
targetPort = 9090;
port = 443;
targetPort = 8443;
}];
selector.k8s-app = "kubernetes-dashboard";
};
@ -127,15 +148,56 @@ in {
labels = {
k8s-app = "kubernetes-dashboard";
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
"addonmanager.kubernetes.io/mode" = "Reconcile";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
name = "kubernetes-dashboard";
namespace = "kube-system";
};
};
kubernetes-dashboard-sec-certs = {
apiVersion = "v1";
kind = "Secret";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
# Allows editing resource and makes sure it is created first.
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
name = "kubernetes-dashboard-certs";
namespace = "kube-system";
};
type = "Opaque";
};
kubernetes-dashboard-sec-kholder = {
apiVersion = "v1";
kind = "Secret";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
# Allows editing resource and makes sure it is created first.
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
name = "kubernetes-dashboard-key-holder";
namespace = "kube-system";
};
type = "Opaque";
};
kubernetes-dashboard-cm = {
apiVersion = "v1";
kind = "ConfigMap";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
# Allows editing resource and makes sure it is created first.
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
name = "kubernetes-dashboard-settings";
namespace = "kube-system";
};
};
} // (optionalAttrs cfg.enableRBAC {
kubernetes-dashboard-crb = {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
name = "kubernetes-dashboard";

View File

@ -5,6 +5,37 @@ with lib;
let
cfg = config.services.kubernetes;
# YAML config; see:
# https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/
# https://github.com/kubernetes/kubernetes/blob/release-1.10/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go
#
# TODO: migrate the following flags to this config file
#
# --pod-manifest-path
# --address
# --port
# --tls-cert-file
# --tls-private-key-file
# --client-ca-file
# --authentication-token-webhook
# --authentication-token-webhook-cache-ttl
# --authorization-mode
# --healthz-bind-address
# --healthz-port
# --allow-privileged
# --cluster-dns
# --cluster-domain
# --hairpin-mode
# --feature-gates
kubeletConfig = pkgs.runCommand "kubelet-config.yaml" { } ''
echo > $out ${pkgs.lib.escapeShellArg (builtins.toJSON {
kind = "KubeletConfiguration";
apiVersion = "kubelet.config.k8s.io/v1beta1";
${if cfg.kubelet.applyManifests then "staticPodPath" else null} =
manifests;
})}
'';
skipAttrs = attrs: map (filterAttrs (k: v: k != "enable"))
(filter (v: !(hasAttr "enable" v) || v.enable) attrs);
@ -339,9 +370,9 @@ in {
type = types.str;
};
admissionControl = mkOption {
enableAdmissionPlugins = mkOption {
description = ''
Kubernetes admission control plugins to use. See
Kubernetes admission control plugins to enable. See
<link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
'';
default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds" "NodeRestriction"];
@ -353,6 +384,15 @@ in {
type = types.listOf types.str;
};
disableAdmissionPlugins = mkOption {
description = ''
Kubernetes admission control plugins to disable. See
<link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
'';
default = [];
type = types.listOf types.str;
};
serviceAccountKeyFile = mkOption {
description = ''
Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
@ -573,6 +613,7 @@ in {
type = types.bool;
};
# TODO: remove this deprecated flag
cadvisorPort = mkOption {
description = "Kubernetes kubelet local cadvisor port.";
default = 4194;
@ -783,12 +824,10 @@ in {
serviceConfig = {
Slice = "kubernetes.slice";
ExecStart = ''${cfg.package}/bin/kubelet \
${optionalString cfg.kubelet.applyManifests
"--pod-manifest-path=${manifests}"} \
${optionalString (taints != "")
"--register-with-taints=${taints}"} \
--kubeconfig=${mkKubeConfig "kubelet" cfg.kubelet.kubeconfig} \
--require-kubeconfig \
--config=${kubeletConfig} \
--address=${cfg.kubelet.address} \
--port=${toString cfg.kubelet.port} \
--register-node=${boolToString cfg.kubelet.registerNode} \
@ -899,7 +938,8 @@ in {
--service-cluster-ip-range=${cfg.apiserver.serviceClusterIpRange} \
${optionalString (cfg.apiserver.runtimeConfig != "")
"--runtime-config=${cfg.apiserver.runtimeConfig}"} \
--admission_control=${concatStringsSep "," cfg.apiserver.admissionControl} \
--enable-admission-plugins=${concatStringsSep "," cfg.apiserver.enableAdmissionPlugins} \
--disable-admission-plugins=${concatStringsSep "," cfg.apiserver.disableAdmissionPlugins} \
${optionalString (cfg.apiserver.serviceAccountKeyFile!=null)
"--service-account-key-file=${cfg.apiserver.serviceAccountKeyFile}"} \
${optionalString cfg.verbose "--v=6"} \

View File

@ -3,27 +3,27 @@
with lib;
let
version = "1.14.4";
version = "1.14.10";
k8s-dns-kube-dns = pkgs.dockerTools.pullImage {
imageName = "gcr.io/google_containers/k8s-dns-kube-dns-amd64";
imageName = "k8s.gcr.io/k8s-dns-kube-dns-amd64";
imageDigest = "sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8";
finalImageTag = version;
sha256 = "0q97xfqrigrfjl2a9cxl5in619py0zv44gch09jm8gqjkxl80imp";
imageDigest = "sha256:40790881bbe9ef4ae4ff7fe8b892498eecb7fe6dcc22661402f271e03f7de344";
sha256 = "0x583znk9smqn0fix7ld8sm5jgaxhqhx3fq97b1wkqm7iwhvl3pj";
};
k8s-dns-dnsmasq-nanny = pkgs.dockerTools.pullImage {
imageName = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64";
imageName = "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64";
imageDigest = "sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8";
finalImageTag = version;
sha256 = "051w5ca4qb88mwva4hbnh9xzlsvv7k1mbk3wz50lmig2mqrqqx6c";
imageDigest = "sha256:aeeb994acbc505eabc7415187cd9edb38cbb5364dc1c2fc748154576464b3dc2";
sha256 = "1fihml7s2mfwgac51cbqpylkwbivc8nyhgi4vb820s83zvl8a6y1";
};
k8s-dns-sidecar = pkgs.dockerTools.pullImage {
imageName = "gcr.io/google_containers/k8s-dns-sidecar-amd64";
imageName = "k8s.gcr.io/k8s-dns-sidecar-amd64";
imageDigest = "sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4";
finalImageTag = version;
sha256 = "1z0d129bcm8i2cqq36x5jhnrv9hirj8c6kjrmdav8vgf7py78vsm";
imageDigest = "sha256:97074c951046e37d3cbb98b82ae85ed15704a290cce66a8314e7f846404edde9";
sha256 = "08l1bv5jgrhvjzpqpbinrkgvv52snc4fzyd8ya9v18ns2klyz7m0";
};
cfg = config.services.kubernetes.addons.dns;
@ -59,7 +59,7 @@ in {
services.kubernetes.addonManager.addons = {
kubedns-deployment = {
apiVersion = "apps/v1beta1";
apiVersion = "extensions/v1beta1";
kind = "Deployment";
metadata = {
labels = {
@ -84,9 +84,38 @@ in {
labels.k8s-app = "kube-dns";
};
spec = {
priorityClassName = "system-cluster-critical";
containers = [
{
name = "kubedns";
image = "k8s.gcr.io/k8s-dns-kube-dns-amd64:${version}";
resources = {
limits.memory = "170Mi";
requests = {
cpu = "100m";
memory = "70Mi";
};
};
livenessProbe = {
failureThreshold = 5;
httpGet = {
path = "/healthcheck/kubedns";
port = 10054;
scheme = "HTTP";
};
initialDelaySeconds = 60;
successThreshold = 1;
timeoutSeconds = 5;
};
readinessProbe = {
httpGet = {
path = "/readiness";
port = 8081;
scheme = "HTTP";
};
initialDelaySeconds = 3;
timeoutSeconds = 5;
};
args = [
"--domain=${cfg.clusterDomain}"
"--dns-port=10053"
@ -99,18 +128,6 @@ in {
value = "10055";
}
];
image = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:${version}";
livenessProbe = {
failureThreshold = 5;
httpGet = {
path = "/healthcheck/kubedns";
port = 10054;
scheme = "HTTP";
};
initialDelaySeconds = 60;
successThreshold = 1;
timeoutSeconds = 5;
};
ports = [
{
containerPort = 10053;
@ -128,22 +145,6 @@ in {
protocol = "TCP";
}
];
readinessProbe = {
httpGet = {
path = "/readiness";
port = 8081;
scheme = "HTTP";
};
initialDelaySeconds = 3;
timeoutSeconds = 5;
};
resources = {
limits.memory = "170Mi";
requests = {
cpu = "100m";
memory = "70Mi";
};
};
volumeMounts = [
{
mountPath = "/kube-dns-config";
@ -152,6 +153,19 @@ in {
];
}
{
name = "dnsmasq";
image = "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:${version}";
livenessProbe = {
httpGet = {
path = "/healthcheck/dnsmasq";
port = 10054;
scheme = "HTTP";
};
initialDelaySeconds = 60;
timeoutSeconds = 5;
successThreshold = 1;
failureThreshold = 5;
};
args = [
"-v=2"
"-logtostderr"
@ -165,19 +179,6 @@ in {
"--server=/in-addr.arpa/127.0.0.1#10053"
"--server=/ip6.arpa/127.0.0.1#10053"
];
image = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:${version}";
livenessProbe = {
failureThreshold = 5;
httpGet = {
path = "/healthcheck/dnsmasq";
port = 10054;
scheme = "HTTP";
};
initialDelaySeconds = 60;
successThreshold = 1;
timeoutSeconds = 5;
};
name = "dnsmasq";
ports = [
{
containerPort = 53;
@ -205,24 +206,24 @@ in {
}
{
name = "sidecar";
image = "gcr.io/google_containers/k8s-dns-sidecar-amd64:${version}";
args = [
"--v=2"
"--logtostderr"
"--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
"--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
];
image = "k8s.gcr.io/k8s-dns-sidecar-amd64:${version}";
livenessProbe = {
failureThreshold = 5;
httpGet = {
path = "/metrics";
port = 10054;
scheme = "HTTP";
};
initialDelaySeconds = 60;
successThreshold = 1;
timeoutSeconds = 5;
successThreshold = 1;
failureThreshold = 5;
};
args = [
"--v=2"
"--logtostderr"
"--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
"--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
];
ports = [
{
containerPort = 10054;

View File

@ -24,7 +24,7 @@ let
{ config, pkgs, lib, nodes, ... }:
mkMerge [
{
virtualisation.memorySize = mkDefault 768;
virtualisation.memorySize = mkDefault 1536;
virtualisation.diskSize = mkDefault 4096;
networking = {
inherit domain extraHosts;

View File

@ -6,7 +6,6 @@ let
featureGates = ["AllAlpha"];
flannel.enable = true;
addons.dashboard.enable = true;
verbose = true;
caFile = "${certs.master}/ca.pem";
apiserver = {

View File

@ -7,7 +7,7 @@
"cmd/kube-apiserver"
"cmd/kube-controller-manager"
"cmd/kube-proxy"
"plugin/cmd/kube-scheduler"
"cmd/kube-scheduler"
"test/e2e/e2e.test"
]
}:
@ -16,16 +16,16 @@ with lib;
stdenv.mkDerivation rec {
name = "kubernetes-${version}";
version = "1.9.7";
version = "1.10.3";
src = fetchFromGitHub {
owner = "kubernetes";
repo = "kubernetes";
rev = "v${version}";
sha256 = "1dykh48c6bvypg51mlxjdyrggpjq597mjj83xgj1pfadsy6pp9bh";
sha256 = "1la9cdf5a67kg72xn4bn5mib1caiv5vxsjmnxqsmx0m7vhbv5i4n";
};
# go > 1.10 should be fixed by https://github.com/kubernetes/kubernetes/pull/60373
# Build using golang v1.9 in accordance with https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#external-dependencies
buildInputs = [ removeReferencesTo makeWrapper which go_1_9 rsync go-bindata ];
outputs = ["out" "man" "pause"];
@ -70,7 +70,7 @@ stdenv.mkDerivation rec {
description = "Production-Grade Container Scheduling and Management";
license = licenses.asl20;
homepage = https://kubernetes.io;
maintainers = with maintainers; [offline];
maintainers = with maintainers; [johanot offline];
platforms = platforms.unix;
};
}