New deployment documentation (#67)

* add docs draft

* add note about local install+fix security model

* remove old helm charts

* remove helm2 examples

* helm based deployment guide + screenshots

* grammar + more spaces

* small fixes

* fix linter errors

* fix headings references

* fix headings references

* russian docs + fixes

* also check docs in charts

* remove charts dir and add it to gitignore

* Wording refinements

* small fixes

* compressed images

* Removed unneeded images

* wording

* Refined

Co-authored-by: iko <ilyakooo0@gmail.com>
This commit is contained in:
Alex-Sizov 2021-08-26 15:23:41 +03:00 committed by GitHub
parent 07c4978265
commit 90fa7b8108
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
124 changed files with 470 additions and 8466 deletions

View File

@ -21,4 +21,4 @@ jobs:
- name: Run linter
run: |
yarn run remark -f -u validate-links -u remark-lint-mdash-style -u remark-lint-final-newline -u remark-lint-list-item-bullet-indent -u remark-lint-no-auto-link-without-protocol -u remark-lint-no-blockquote-without-marker -u remark-lint-ordered-list-marker-style -u remark-lint-no-literal-urls -u remark-lint-hard-break-spaces -u remark-lint-no-duplicate-definitions -u remark-lint-no-heading-content-indent -u remark-lint-no-inline-padding -u remark-lint-no-shortcut-reference-image -u remark-lint-no-shortcut-reference-link -u remark-lint-no-undefined-references -u remark-lint-no-unused-definitions -u remark-lint-no-dead-urls docs README.md
yarn run remark -f -u validate-links -u remark-lint-mdash-style -u remark-lint-final-newline -u remark-lint-list-item-bullet-indent -u remark-lint-no-auto-link-without-protocol -u remark-lint-no-blockquote-without-marker -u remark-lint-ordered-list-marker-style -u remark-lint-no-literal-urls -u remark-lint-hard-break-spaces -u remark-lint-no-duplicate-definitions -u remark-lint-no-heading-content-indent -u remark-lint-no-inline-padding -u remark-lint-no-shortcut-reference-image -u remark-lint-no-shortcut-reference-link -u remark-lint-no-undefined-references -u remark-lint-no-unused-definitions -u remark-lint-no-dead-urls docs README.md charts

View File

@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,5 +0,0 @@
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes
name: cert-control
version: 0.1.0

View File

@ -1,8 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-control-clusterrole
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates"]
verbs: ["list", "delete", "deletecollection"]

View File

@ -1,13 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Values.serviceaccount }}-cert-control-rolebinding
namespace: {{ .Values.namespace }}
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: cert-control-clusterrole
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceaccount }}
namespace: {{ .Values.octopod_namespace | default .Values.namespace }}

View File

@ -1,3 +0,0 @@
namespace: deployment
octopod_namespace: octopod
serviceaccount: octopod

View File

@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,5 +0,0 @@
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes
name: helm-access
version: 0.1.0

View File

@ -1,11 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: helm-clusterrole
rules:
- apiGroups: [""]
resources: ["pods/portforward"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get"]

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Values.serviceaccount }}-helm-clusterrolebinding
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: helm-clusterrole
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceaccount }}
namespace: {{ .Values.namespace }}

View File

@ -1,2 +0,0 @@
namespace: octopod
serviceaccount: octopod

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,20 +0,0 @@
apiVersion: v2
name: octopod
description: An opensource self-hosted solution for managing multiple deployments in a Kubernetes cluster.
type: application
version: 0.5.1
appVersion: 1.3.1
keywords:
- kubernetes
- octopod
home: https://octopod.site
sources:
- https://github.com/typeable/octopod
maintainers:
- name: Alex Sizov
email: a.sizov@typeable.io
dependencies:
- name: postgresql
version: 10.5.3
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled

View File

@ -1,8 +0,0 @@
Thank you for installing Octopod!
You can access UI here: {{ include "httpScheme" . }}://{{ include "uiIngressHost" . }}
To access octopod via octo CLI you can use this url {{ include "httpScheme" . }}://{{ include "powerAppIngressHost" . }}
To get you CLI secret you need to execute this command:
kubectl -n {{ .Release.Namespace }} get secret {{ include "octopodCliAuthSecretName" . }} -o jsonpath='{.data.cli-secret}' | base64 -d
{{ if and (not .Values.octopod.cliAuthSecret) (not .Values.vault.enabled) .Release.IsUpgrade }}
{{ fail "To upgrade you need to set octopod.cliAuthSecret variable with your current secret" }}
{{- end }}

View File

@ -1,146 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "octopod.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "octopod.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "octopod.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "octopod.labels" -}}
helm.sh/chart: {{ include "octopod.chart" . }}
{{ include "octopod.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "octopod.selectorLabels" -}}
app.kubernetes.io/name: {{ include "octopod.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "octopod.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "octopod.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{- define "controlScriptsPath" -}}
/utils
{{- end }}
{{- define "octopodAppAuthSecretName" -}}
{{- printf "%s-app-auth-secret" (include "octopod.fullname" .) }}
{{- end }}
{{- define "httpScheme" -}}
{{- if .Values.ingress.tls.enabled -}}
https
{{- else -}}
http
{{- end }}
{{- end }}
{{- define "wsScheme" -}}
{{- if .Values.ingress.tls.enabled -}}
wss
{{- else -}}
ws
{{- end }}
{{- end }}
{{- define "postgresqlHost" -}}
{{ .Release.Name }}-postgresql
{{- end }}
{{- define "postgresqlSecretName" -}}
{{ .Release.Name }}-postgresql
{{- end }}
{{- define "wsIngressHost" -}}
{{- if .Values.ingress.ws.host -}}
{{ .Values.ingress.ws.host }}
{{- else -}}
octopod-ws.{{ .Values.octopod.baseDomain }}
{{- end }}
{{- end }}
{{- define "uiIngressHost" -}}
{{- if .Values.ingress.ui.host -}}
{{ .Values.ingress.ui.host }}
{{- else -}}
octopod.{{ .Values.octopod.baseDomain }}
{{- end }}
{{- end }}
{{- define "powerAppIngressHost" -}}
{{- if .Values.ingress.powerApp.host -}}
{{ .Values.ingress.powerApp.host }}
{{- else -}}
octopod-powerapp.{{ .Values.octopod.baseDomain }}
{{- end }}
{{- end }}
{{- define "appIngressHost" -}}
{{- if .Values.ingress.app.host -}}
{{ .Values.ingress.app.host }}
{{- else -}}
octopod-app.{{ .Values.octopod.baseDomain }}
{{- end }}
{{- end }}
{{- define "octopodCliAuthSecretName" -}}
{{- printf "%s-cli-auth-secret" (include "octopod.fullname" .) }}
{{- end }}
{{- define "octopodCliAuthSecret" -}}
{{- if .Values.octopod.cliAuthSecret -}}
{{ .Values.octopod.cliAuthSecret }}
{{- else }}
{{- randAlphaNum 32 }}
{{- end }}
{{- end }}
{{- define "octopodUiAuthSecret" -}}
{{- if .Values.octopod.uiAuthSecret -}}
{{ .Values.octopod.uiAuthSecret }}
{{- else }}
{{- randAlphaNum 32 }}
{{- end }}
{{- end }}

View File

@ -1,23 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "octopod.fullname" . }}
labels:
{{- include "octopod.labels" . | nindent 4 }}
data:
PROJECT_NAME: {{ .Values.octopod.projectName | quote }}
BASE_DOMAIN: {{ .Values.octopod.baseDomain | quote }}
NAMESPACE: {{ .Values.octopod.deploymentNamespace | quote }}
STATUS_UPDATE_TIMEOUT: {{ .Values.octopod.statusUpdateTimeout | quote }}
ARCHIVE_RETENTION: {{ .Values.octopod.archiveRetention | quote }}
CREATION_COMMAND: {{ printf "%s/create" (include "controlScriptsPath" .) | quote }}
UPDATE_COMMAND: {{ printf "%s/update" (include "controlScriptsPath" .) | quote }}
ARCHIVE_COMMAND: {{ printf "%s/archive" (include "controlScriptsPath" .) | quote }}
CHECKING_COMMAND: {{ printf "%s/check" (include "controlScriptsPath" .) | quote }}
CLEANUP_COMMAND: {{ printf "%s/cleanup" (include "controlScriptsPath" .) | quote }}
ARCHIVE_CHECKING_COMMAND: {{ printf "%s/archive_check" (include "controlScriptsPath" .) | quote }}
TAG_CHECKING_COMMAND: {{ printf "%s/tag_check" (include "controlScriptsPath" .) | quote }}
INFO_COMMAND: {{ printf "%s/info" (include "controlScriptsPath" .) | quote }}
{{- range $name, $value := .Values.octopod.env }}
{{ $name }}: {{ $value | quote }}
{{- end }}

View File

@ -1,119 +0,0 @@
# Default values for octopod.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: typeable/octopod
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
rbac:
create: true
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext:
runAsUser: 1000
runAsGroup: 1000
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
service:
type: ClusterIP
ports:
powerApp: 4443
ui: 80
app: 4000
ws: 4020
ingress:
enabled: true
ingressClass: nginx
tls:
enabled: true
clusterIssuer: letsencrypt
powerApp:
annotations: {}
ui:
annotations: {}
app:
annotations: {}
ws:
annotations: {}
resources:
limits:
cpu: 200m
memory: 512Mi
requests:
cpu: 200m
memory: 256Mi
nodeSelector: {}
tolerations: []
affinity: {}
octopod:
projectName: Octopod
deploymentNamespace: octopod-deployment
baseDomain: ""
statusUpdateTimeout: 600
archiveRetention: 1209600
migrations:
enabled: true
env:
HELM_BIN: "/utils/helm"
KUBECTL_BIN: "/utils/kubectl"
DEFAULTS: |
{
"chart_name": "wordpress",
"chart_repo_name": "bitnami",
"chart_repo_url": "https://charts.bitnami.com/bitnami",
"chart_version": "12.0.0",
"default_overrides": []
}
vaultEnv: {}
controlScripts:
image:
repository: typeable/octopod-helm-control-scripts
pullPolicy: IfNotPresent
tag: 0.1.0
sqitch:
image:
repository: typeable/sqitch
pullPolicy: IfNotPresent
tag: v2.0.0
postgresql:
enabled: true
postgresqlUsername: octopod
postgresqlDatabase: octopod
image:
tag: 12.7.0-debian-10-r51
vault:
enabled: false
clusterName: ""
kubernetesDashboard:
enabled: false
url: ""

View File

@ -1,5 +0,0 @@
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes
name: job-control
version: 0.1.0

View File

@ -1,8 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: job-control-clusterrole
rules:
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["list", "delete", "deletecollection"]

View File

@ -1,13 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Values.serviceaccount }}-job-control-rolebinding
namespace: {{ .Values.namespace }}
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: job-control-clusterrole
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceaccount }}
namespace: {{ .Values.octopod_namespace | default .Values.namespace }}

View File

@ -1,3 +0,0 @@
namespace: deployment
octopod_namespace: octopod
serviceaccount: octopod

View File

@ -1,5 +0,0 @@
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes
name: helm-access
version: 0.1.0

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kubedog-clusterrole
rules:
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["list", "watch"]
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["list", "watch"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list"]

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Values.serviceaccount }}-kubedog-clusterrolebinding
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: kubedog-clusterrole
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceaccount }}
namespace: {{ .Values.namespace }}

View File

@ -1,2 +0,0 @@
namespace: octopod
serviceaccount: octopod

View File

@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,5 +0,0 @@
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes
name: octopod-infra
version: 0.1.0

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-postgres-config
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Release.Name }}-postgres
data:
POSTGRES_DB: {{ .Values.postgres_db | default .Release.Name }}
POSTGRES_USER: {{ .Values.postgres_user | default "postgres" }}
POSTGRES_PASSWORD: {{ .Values.postgres_password | default "password" }}

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-postgres
namespace: {{ .Release.Namespace }}
labels:
name: {{ .Release.Name }}-postgres
spec:
selector:
app: {{ .Release.Name }}-postgres
clusterIP: None
ports:
- port: 5432
name: postgres

View File

@ -1,51 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-postgres
namespace: {{ .Release.Namespace }}
spec:
serviceName: {{ .Release.Name }}-postgres
replicas: 1
selector:
matchLabels:
app: {{ .Release.Name }}-postgres
template:
metadata:
labels:
app: {{ .Release.Name }}-postgres
spec:
nodeSelector:
role: {{ .Values.nodeselector }}
terminationGracePeriodSeconds: 120
containers:
- name: postgres
image: postgres:12
envFrom:
- configMapRef:
name: {{ .Release.Name }}-postgres-config
resources:
requests:
cpu: {{ .Values.requests.cpu }}
memory: {{ .Values.requests.memory }}
limits:
cpu: {{ .Values.limits.cpu }}
memory: {{ .Values.limits.memory }}
ports:
- containerPort: 5432
name: postgredb
volumeMounts:
- name: postgredb
mountPath: /var/lib/postgresql/data
subPath: postgres
volumeClaimTemplates:
- metadata:
name: postgredb
labels:
app: {{ .Release.Name }}-postgres
spec:
accessModes:
- "ReadWriteOnce"
storageClassName: {{ .Values.storage_class | default "default" }}
resources:
requests:
storage: {{ .Values.storage_size }}

View File

@ -1,17 +0,0 @@
global:
image_prefix:
image: default
image_tag:
namespace: octopod
nodeselector: stand
postgres_db: octopod
postgres_user: octopod
postgres_password: octopod
storage_size: 1Gi
requests:
cpu: 0.2
memory: 256Mi
limits:
cpu: 0.2
memory: 512Mi

1
charts/octopod/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
charts

View File

@ -14,6 +14,7 @@
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project

View File

@ -1,5 +1,20 @@
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes
apiVersion: v2
name: octopod
version: 0.1.0
description: An opensource self-hosted solution for managing multiple deployments in a Kubernetes cluster.
type: application
version: 0.5.1
appVersion: 1.3.1
keywords:
- kubernetes
- octopod
home: https://octopod.site
sources:
- https://github.com/typeable/octopod
maintainers:
- name: Alex Sizov
email: a.sizov@typeable.io
dependencies:
- name: postgresql
version: 10.5.3
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled

View File

@ -11,7 +11,7 @@ $ helm install octopod typeable/octopod --set octopod.baseDomain="your-domain.co
## Introduction
This chart bootstraps an Octopod deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
This chart bootstraps an Octopod deployment in a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
@ -22,17 +22,19 @@ This chart bootstraps an Octopod deployment on a [Kubernetes](http://kubernetes.
## Installing the Chart
This chart will not create or delete any namespaces for you.
You'll need to create 2 namespaces before installing:
First in which octopod will be installed
```console
$ kubectl create namespace octopod
```
Second in which Octopod will deploy all it's deployments (configured in octopod.deploymentNamespace)
```console
$ kubectl create namespace octopod-deployments
```
1. A namespace in which Octopod itself will be installed:
```console
$ kubectl create namespace octopod
```
2. A namespace in which Octopod will deploy all your deployments (configured in `octopod.deploymentNamespace`):
```console
$ kubectl create namespace octopod-deployments
```
To install the chart with the release name `my-release` from current directory execute:
To install the chart with the release name `my-release` execute:
```console
$ helm repo add typeable https://typeable.github.io/octopod/
@ -55,10 +57,10 @@ The command removes all the Kubernetes components but PVC's associated with the
Some values (such as passwords) in this chart (and its dependencies) are generated automatically, but due to [a limitation in helm](https://github.com/helm/charts/issues/5167) the values are changing on every upgrade. To prevent this you must fix these values by providing them via `--set` flags or in the [values file](https://helm.sh/docs/chart_template_guide/values_files/).
These values are:
- `postgresql.postgresqlPassword` - main db password
- `postgresql.postgresqlPostgresPassword` - password for "postgres" user
- `octopod.cliAuthSecret` - auth header for octo cli tool
- `octopod.uiAuthSecret` - basic auth secret for ui->octopod communication
- `postgresql.postgresqlPassword` main db password
- `postgresql.postgresqlPostgresPassword` password for "postgres" user
- `octopod.cliAuthSecret` auth header for octo cli tool
- `octopod.uiAuthSecret` basic auth secret for ui->octopod communication
Note: if these values are not provided, the `helm upgrade` command can fail or Octopod will not work after the upgrade.

View File

@ -0,0 +1,8 @@
Thank you for installing Octopod!
You can access the Web UI here: {{ include "httpScheme" . }}://{{ include "uiIngressHost" . }}
To access Octopod via octo CLI you can use this URL: {{ include "httpScheme" . }}://{{ include "powerAppIngressHost" . }}
To get your CLI secret you need to execute this command:
kubectl -n {{ .Release.Namespace }} get secret {{ include "octopodCliAuthSecretName" . }} -o jsonpath='{.data.cli-secret}' | base64 -d
{{ if and (not .Values.octopod.cliAuthSecret) (not .Values.vault.enabled) .Release.IsUpgrade }}
{{ fail "To upgrade you need to set the octopod.cliAuthSecret variable with your current secret" }}
{{- end }}

View File

@ -1,7 +1,146 @@
{{/*
Set dbname
Expand the name of the chart.
*/}}
{{- define "dbname" -}}
{{- $dbname_release := .Release.Name | replace "." "_" | replace "-" "_" -}}
{{- .Values.dbname | default $dbname_release }}
{{- end -}}
{{- define "octopod.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "octopod.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "octopod.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "octopod.labels" -}}
helm.sh/chart: {{ include "octopod.chart" . }}
{{ include "octopod.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "octopod.selectorLabels" -}}
app.kubernetes.io/name: {{ include "octopod.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "octopod.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "octopod.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{- define "controlScriptsPath" -}}
/utils
{{- end }}
{{- define "octopodAppAuthSecretName" -}}
{{- printf "%s-app-auth-secret" (include "octopod.fullname" .) }}
{{- end }}
{{- define "httpScheme" -}}
{{- if .Values.ingress.tls.enabled -}}
https
{{- else -}}
http
{{- end }}
{{- end }}
{{- define "wsScheme" -}}
{{- if .Values.ingress.tls.enabled -}}
wss
{{- else -}}
ws
{{- end }}
{{- end }}
{{- define "postgresqlHost" -}}
{{ .Release.Name }}-postgresql
{{- end }}
{{- define "postgresqlSecretName" -}}
{{ .Release.Name }}-postgresql
{{- end }}
{{- define "wsIngressHost" -}}
{{- if .Values.ingress.ws.host -}}
{{ .Values.ingress.ws.host }}
{{- else -}}
octopod-ws.{{ .Values.octopod.baseDomain }}
{{- end }}
{{- end }}
{{- define "uiIngressHost" -}}
{{- if .Values.ingress.ui.host -}}
{{ .Values.ingress.ui.host }}
{{- else -}}
octopod.{{ .Values.octopod.baseDomain }}
{{- end }}
{{- end }}
{{- define "powerAppIngressHost" -}}
{{- if .Values.ingress.powerApp.host -}}
{{ .Values.ingress.powerApp.host }}
{{- else -}}
octopod-powerapp.{{ .Values.octopod.baseDomain }}
{{- end }}
{{- end }}
{{- define "appIngressHost" -}}
{{- if .Values.ingress.app.host -}}
{{ .Values.ingress.app.host }}
{{- else -}}
octopod-app.{{ .Values.octopod.baseDomain }}
{{- end }}
{{- end }}
{{- define "octopodCliAuthSecretName" -}}
{{- printf "%s-cli-auth-secret" (include "octopod.fullname" .) }}
{{- end }}
{{- define "octopodCliAuthSecret" -}}
{{- if .Values.octopod.cliAuthSecret -}}
{{ .Values.octopod.cliAuthSecret }}
{{- else }}
{{- randAlphaNum 32 }}
{{- end }}
{{- end }}
{{- define "octopodUiAuthSecret" -}}
{{- if .Values.octopod.uiAuthSecret -}}
{{ .Values.octopod.uiAuthSecret }}
{{- else }}
{{- randAlphaNum 32 }}
{{- end }}
{{- end }}

View File

@ -1,47 +0,0 @@
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: {{ .Release.Name }}-clean-archive-cronjob
namespace: {{ .Values.namespace }}
spec:
schedule: "0 */1 * * *"
jobTemplate:
spec:
template:
metadata:
labels:
app: {{ .Release.Name }}
annotations:
checksum/config: "{{ .Values.global.deploy_checksum }}"
spec:
nodeSelector:
role: {{ .Values.nodeselector }}
containers:
- name: octo
image: {{ .Values.global.image_prefix }}/{{ .Values.global.octo_image }}:{{ .Values.global.image_tag }}
command:
- /app/octo
args:
- clean-archive
env:
- name: OCTOPOD_URL
value: https://{{ .Values.power_app_domain }}:443
volumeMounts:
- name: certs
mountPath: /cert.pem
subPath: client_cert.pem
- name: certs
mountPath: /key.pem
subPath: client_key.pem
resources:
requests:
cpu: 0.1
memory: 256Mi
limits:
cpu: 0.1
memory: 512Mi
restartPolicy: Never
volumes:
- name: certs
configMap:
name: octopod-certs

View File

@ -1,32 +0,0 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ .Release.Name }}-app-nginx-ingress
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/ingress.class: "nginx"
kubernetes.io/tls-acme: "true"
cert-manager.io/issuer: "{{ .Release.Name }}-certs"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-next-upstream: "http_502 error timeout"
nginx.ingress.kubernetes.io/auth-secret: octopod-basic-auth
nginx.ingress.kubernetes.io/auth-secret-type: auth-file
nginx.ingress.kubernetes.io/auth-type: basic
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/cors-allow-origin: "https://{{ .Values.domain }}"
nginx.ingress.kubernetes.io/cors-allow-methods: "GET, POST, PUT, DELETE, PATCH, OPTIONS"
spec:
tls:
- hosts:
- {{ .Values.app_domain }}
secretName: {{ .Release.Name }}-app-tls
rules:
- host: {{ .Values.app_domain }}
http:
paths:
- path: /
backend:
serviceName: {{ .Release.Name }}
servicePort: 81

View File

@ -1,19 +1,23 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-config
namespace: {{ .Values.namespace }}
name: {{ include "octopod.fullname" . }}
labels:
{{- include "octopod.labels" . | nindent 4 }}
data:
PROJECT_NAME: {{ .Values.project_name }}
BASE_DOMAIN: {{ .Values.base_domain }}
NAMESPACE: {{ .Values.target_namespace }}
STATUS_UPDATE_TIMEOUT: "{{ .Values.status_update_timeout }}"
ARCHIVE_RETENTION: "1209600"
CREATION_COMMAND: /utils/create
UPDATE_COMMAND: /utils/update
ARCHIVE_COMMAND: /utils/archive
CHECKING_COMMAND: /utils/check
CLEANUP_COMMAND: /utils/cleanup
ARCHIVE_CHECKING_COMMAND: /utils/archive_check
TAG_CHECKING_COMMAND: /utils/tag_check
INFO_COMMAND: /utils/info
PROJECT_NAME: {{ .Values.octopod.projectName | quote }}
BASE_DOMAIN: {{ .Values.octopod.baseDomain | quote }}
NAMESPACE: {{ .Values.octopod.deploymentNamespace | quote }}
STATUS_UPDATE_TIMEOUT: {{ .Values.octopod.statusUpdateTimeout | quote }}
ARCHIVE_RETENTION: {{ .Values.octopod.archiveRetention | quote }}
CREATION_COMMAND: {{ printf "%s/create" (include "controlScriptsPath" .) | quote }}
UPDATE_COMMAND: {{ printf "%s/update" (include "controlScriptsPath" .) | quote }}
ARCHIVE_COMMAND: {{ printf "%s/archive" (include "controlScriptsPath" .) | quote }}
CHECKING_COMMAND: {{ printf "%s/check" (include "controlScriptsPath" .) | quote }}
CLEANUP_COMMAND: {{ printf "%s/cleanup" (include "controlScriptsPath" .) | quote }}
ARCHIVE_CHECKING_COMMAND: {{ printf "%s/archive_check" (include "controlScriptsPath" .) | quote }}
TAG_CHECKING_COMMAND: {{ printf "%s/tag_check" (include "controlScriptsPath" .) | quote }}
INFO_COMMAND: {{ printf "%s/info" (include "controlScriptsPath" .) | quote }}
{{- range $name, $value := .Values.octopod.env }}
{{ $name }}: {{ $value | quote }}
{{- end }}

View File

@ -1,153 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
namespace: {{ .Values.namespace }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
app: {{ .Release.Name }}
template:
metadata:
name: {{ .Release.Name }}
labels:
app: {{ .Release.Name }}
annotations:
checksum/config: "{{ .Values.global.deploy_checksum }}"
spec:
serviceAccountName: {{ .Values.service_account }}
nodeSelector:
role: {{ .Values.nodeselector }}
terminationGracePeriodSeconds: 600
initContainers:
- name: copy-utils
image: {{ .Values.global.utils_image_prefix }}/{{ .Values.global.utils_image }}:{{ .Values.global.utils_image_tag }}
command:
- sh
- -c
- 'cp /utils/* /copy/'
volumeMounts:
- name: utils
mountPath: /copy
- name: init
image: {{ .Values.global.image_prefix }}/{{ .Values.global.image }}:{{ .Values.global.image_tag }}
command:
- sh
- -c
- '/utils/init'
securityContext:
runAsGroup: 1000
runAsUser: 1000
volumeMounts:
- name: home
mountPath: /home/octopod
- name: utils
mountPath: /utils
- name: copy-www
image: {{ .Values.global.image_prefix }}/{{ .Values.global.image }}:{{ .Values.global.image_tag }}
command:
- sh
- -c
- |
set -ex
cp -a /www/* /copy/
find /www -type f -exec touch {} +
volumeMounts:
- name: www
mountPath: /copy
containers:
- name: main
image: {{ .Values.global.image_prefix }}/{{ .Values.global.image }}:{{ .Values.global.image_tag }}
ports:
- containerPort: {{ .Values.port }}
protocol: TCP
- containerPort: {{ .Values.ui_port }}
protocol: TCP
args:
- "--port"
- "{{ .Values.port }}"
- "--ui-port"
- "{{ .Values.ui_port }}"
- "--ws-port"
- "{{ .Values.ws_port }}"
- "--db"
- "host='{{ .Values.pg_host }}' port=5432 user='octopod' password='octopod'"
- "--db-pool-size"
- "10"
- "--tls-cert-path"
- "/tls/server_cert.pem"
- "--tls-key-path"
- "/tls/server_key.pem"
- "--tls-store-path"
- "/tls_store"
envFrom:
- configMapRef:
name: {{ .Release.Name }}-config
securityContext:
runAsGroup: 1000
runAsUser: 1000
volumeMounts:
- name: home
mountPath: /home/octopod
- name: utils
mountPath: /utils
- name: certs
mountPath: /tls/server_cert.pem
subPath: server_cert.pem
- name: certs
mountPath: /tls/server_key.pem
subPath: server_key.pem
- name: certs
mountPath: /tls_store/server_cert.pem
subPath: server_cert.pem
resources:
requests:
cpu: 0.2
memory: 256Mi
limits:
cpu: 0.2
memory: 512Mi
readinessProbe:
httpGet:
port: {{ .Values.ui_port }}
path: /api/v1/ping
periodSeconds: 20
livenessProbe:
httpGet:
port: {{ .Values.ui_port }}
path: /api/v1/ping
initialDelaySeconds: 15
periodSeconds: 5
- name: nginx
image: nginx:1.17.5
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/conf.d/app.conf
subPath: app.conf
- name: nginx-config
mountPath: /www/config.json
subPath: config.json
- name: www
mountPath: /www
ports:
- containerPort: 80
protocol: TCP
volumes:
- name: home
emptyDir: {}
- name: utils
emptyDir: {}
- name: www
emptyDir: {}
- name: nginx-config
configMap:
name: {{ .Release.Name }}-nginx-config
- name: certs
configMap:
name: octopod-certs

View File

@ -1,34 +0,0 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ .Release.Name }}-nginx-ingress
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/ingress.class: "nginx"
kubernetes.io/tls-acme: "true"
cert-manager.io/issuer: "{{ .Release.Name }}-certs"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-next-upstream: "http_502 error timeout"
{{- if .Values.global.auth_url }}
nginx.ingress.kubernetes.io/auth-url: "{{ .Values.global.auth_url }}"
{{- end }}
{{- if .Values.global.auth_signin }}
nginx.ingress.kubernetes.io/auth-signin: "{{ .Values.global.auth_signin }}"
{{- end }}
spec:
tls:
- hosts:
- {{ .Values.domain }}
secretName: {{ .Release.Name }}-tls
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: /
backend:
serviceName: {{ .Release.Name }}
servicePort: 80

View File

@ -1,18 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ .Release.Name }}-certs
namespace: {{ .Release.Namespace }}
spec:
acme:
email: {{ .Values.acme_registration_email }}
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: {{ .Release.Name }}-letsencrypt
# ACME HTTP-01 provider configurations
solvers:
# An empty 'selector' means that this solver matches all domains
- selector: {}
http01:
ingress:
class: nginx

View File

@ -1,75 +0,0 @@
{{- if .Values.migrations }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Release.Name }}-migration-job
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-delete-policy": hook-succeeded
spec:
activeDeadlineSeconds: 600
template:
spec:
containers:
- name: copy
image: {{ .Values.global.image_prefix }}/{{ .Values.global.image }}:{{ .Values.global.image_tag }}
command:
- "bash"
- "-ec"
- |
set -ex
# copy migrations
cp -av /migrations/* /mymigrations
# create sqitch.conf
echo '[core]' > /mymigrations/sqitch.conf
echo 'engine = pg' >> /mymigrations/sqitch.conf
echo 'plan_file = sqitch.plan' >> /mymigrations/sqitch.conf
echo 'top_dir = .' >> /mymigrations/sqitch.conf
echo '[engine "pg"]' >> /mymigrations/sqitch.conf
echo ' registry = sqitch' >> /mymigrations/sqitch.conf
echo '[deploy]' >> /mymigrations/sqitch.conf
echo ' verify = true' >> /mymigrations/sqitch.conf
echo '[rebase]' >> /mymigrations/sqitch.conf
echo ' verify = true' >> /mymigrations/sqitch.conf
echo '[target "octopod"]' >> /mymigrations/sqitch.conf
echo 'uri = db:pg://{{ .Values.connections.pg_instance }}/{{ template "dbname" . }}' >> /mymigrations/sqitch.conf
volumeMounts:
- name: migrations
mountPath: /mymigrations
- name: migrations
image: {{ .Values.global.image_prefix }}/{{ .Values.global.image }}:sqitch-v2.0.0
command:
- "bash"
- "-ec"
- |
set -ex
{{- if .Values.seed }}
echo 'check db'
POSTGRESQL_CONN="psql postgresql://{{ .Values.connections.pg_instance }}/postgres"
DBNAME={{ template "dbname" . }}
($POSTGRESQL_CONN -Atc "SELECT count(*) FROM pg_database WHERE lower(datname) = lower('$DBNAME');" | grep 1) || $POSTGRESQL_CONN -Atc "create database $DBNAME;"
{{- end }}
echo 'run migrations...'
cd /migrations && /usr/local/bin/sqitch deploy octopod
{{- if .Values.seed }}
echo 'seed'
DB_CONN="psql postgresql://{{ .Values.connections.pg_instance }}/{{ template "dbname" . }}"
cd /migrations && $DB_CONN -1 -f seeds.sql || echo 'ok'
{{- end }}
volumeMounts:
- name: migrations
mountPath: /migrations
volumes:
- name: migrations
emptyDir: {}
restartPolicy: Never
backoffLimit: 2
{{- end }}

View File

@ -1,25 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-nginx-config
namespace: {{ .Values.namespace }}
data:
app.conf: |
server {
listen 80 default_server;
server_name _;
root /www;
index index.html;
error_page 404 =200 /index.html;
}
config.json: |
{
"app_url": "https://{{ .Values.app_domain }}",
"ws_url": "wss://{{ .Values.ws_domain }}",
"app_auth": "Basic {{ .Values.basic_auth_token }}",
{{ if .Values.kubernetes_dashboard_url }}
"kubernetes_dashboard_url_template": "{{ .Values.kubernetes_dashboard_url }}/#/search?namespace={{ .Values.target_namespace }}&q="
{{ else }}
"kubernetes_dashboard_url_template": null
{{ end }}
}

View File

@ -1,25 +0,0 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ .Release.Name }}-power-app-nginx-ingress
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-next-upstream: "http_502 error timeout"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
rules:
- host: {{ .Values.power_app_domain }}
http:
paths:
- path: /
backend:
serviceName: {{ .Release.Name }}
servicePort: 443
tls:
- hosts:
- {{ .Values.power_app_domain }}

View File

@ -1,23 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
namespace: {{ .Values.namespace }}
labels:
app: {{ .Release.Name }}
spec:
selector:
app: {{ .Release.Name }}
ports:
- name: octopod-power-app
port: 443
targetPort: {{ .Values.port }}
- name: octopod-ui
port: 80
targetPort: 80
- name: octopod-app
port: 81
targetPort: {{ .Values.ui_port }}
- name: octopod-ws
port: 82
targetPort: {{ .Values.ws_port }}

View File

@ -1,26 +0,0 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ .Release.Name }}-ws-nginx-ingress
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/ingress.class: "nginx"
kubernetes.io/tls-acme: "true"
cert-manager.io/issuer: "{{ .Release.Name }}-certs"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-next-upstream: "http_502 error timeout"
spec:
tls:
- hosts:
- {{ .Values.ws_domain }}
secretName: {{ .Release.Name }}-ws-tls
rules:
- host: {{ .Values.ws_domain }}
http:
paths:
- path: /
backend:
serviceName: {{ .Release.Name }}
servicePort: 82

View File

@ -1,38 +1,119 @@
global:
image_prefix:
image: octopod
octo_image: octo
image_tag:
utils_image_prefix:
utils_image:
utils_image_tag:
# Default values for octopod.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
namespace: octopod
target_namespace: deployment
nodeselector: stand
service_account: octopod
port: 4443
ui_port: 4000
ws_port: 4020
dbname: octopod
seed: false
migrations: true
replicas: 1
domain: octopod.stage.example.com
app_domain: octopod-app.stage.example.com
ws_domain: octopod-ws.stage.example.com
power_app_domain: octopod-power-app.stage.example.com
base_domain: stage.example.com
project_name: Octopod
status_update_timeout: 600
acme_registration_email:
basic_auth_token:
connections:
pg_instance: octopod:octopod@octopod-infra-postgres-0.octopod-infra-postgres.octopod:5432
pg_host: octopod-infra-postgres-0.octopod-infra-postgres.octopod
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 200m
memory: 512Mi
replicaCount: 1
image:
repository: typeable/octopod
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
rbac:
create: true
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext:
runAsUser: 1000
runAsGroup: 1000
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
service:
type: ClusterIP
ports:
powerApp: 4443
ui: 80
app: 4000
ws: 4020
ingress:
enabled: true
ingressClass: nginx
tls:
enabled: true
clusterIssuer: letsencrypt
powerApp:
annotations: {}
ui:
annotations: {}
app:
annotations: {}
ws:
annotations: {}
resources:
limits:
cpu: 200m
memory: 512Mi
requests:
cpu: 200m
memory: 256Mi
nodeSelector: {}
tolerations: []
affinity: {}
octopod:
projectName: Octopod
deploymentNamespace: octopod-deployment
baseDomain: ""
statusUpdateTimeout: 600
archiveRetention: 1209600
migrations:
enabled: true
env:
HELM_BIN: "/utils/helm"
KUBECTL_BIN: "/utils/kubectl"
DEFAULTS: |
{
"chart_name": "wordpress",
"chart_repo_name": "bitnami",
"chart_repo_url": "https://charts.bitnami.com/bitnami",
"chart_version": "12.0.0",
"default_overrides": []
}
vaultEnv: {}
controlScripts:
image:
repository: typeable/octopod-helm-control-scripts
pullPolicy: IfNotPresent
tag: 0.1.0
sqitch:
image:
repository: typeable/sqitch
pullPolicy: IfNotPresent
tag: v2.0.0
postgresql:
enabled: true
postgresqlUsername: octopod
postgresqlDatabase: octopod
image:
tag: 12.7.0-debian-10-r51
vault:
enabled: false
clusterName: ""
kubernetesDashboard:
enabled: false
url: ""

View File

@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,5 +0,0 @@
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes
name: pvc-control
version: 0.1.0

View File

@ -1,8 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pvc-control-clusterrole
rules:
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["list", "delete", "deletecollection"]

View File

@ -1,13 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Values.serviceaccount }}-pvc-control-rolebinding
namespace: {{ .Values.namespace }}
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: pvc-control-clusterrole
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceaccount }}
namespace: {{ .Values.octopod_namespace | default .Values.namespace }}

View File

@ -1,3 +0,0 @@
namespace: deployment
octopod_namespace: octopod
serviceaccount: octopod

View File

@ -1,177 +1,93 @@
# Helm-based deployment guide
<details>
<summary>Table of contents</summary>
In this guide we'll show you how to deploy bitnami's [wordpress chart](https://github.com/bitnami/charts/tree/master/bitnami/wordpress) with Octopod.
- [The web application](#the-web-application)
- [Setting up Octopod](#setting-up-octopod)
- [Control scripts](#control-scripts)
- [A word about TLS](#a-word-about-tls)
- [Deploying Octopod](#deploying-octopod)
- [Testing out the deployment](#testing-out-the-deployment)
- [Setting up _octo CLI_](#setting-up-octo-cli)
- [Setting up certificates](#setting-up-certificates)
- [Setting up the API URL](#setting-up-the-api-url)
- [Creating a deployment](#creating-a-deployment)
- [Adding an override](#adding-an-override)
- [Updating the deployment version](#updating-the-deployment-version)
- [Changing the number of replicas](#changing-the-number-of-replicas)
## Your first deployment
</details>
### Install Octopod
In this guide, we will examine a very simple web application and explore setting up _Octopod_ to deploy it.
Make sure that you have Octopod installed before going further. If you haven't, go check our [Octopod deployment guide](Octopod_deployment_guide.md).
## The web application
Note: In this guide we assume that you have octopod installed on your local machine.
Open up Octopod in your browser and you'll see something like this:
The web application we will be using is a very simple application that serves a single endpoint `/`. The returned HTML markup contains the environment variables the executable has read from the environment. The only variables returned are the ones whose name starts with `APP_ENV`.
![](../images/octopod_blank.png)
![](../images/first.png)
### Create the deployment
The source code can be found in the [examples/web-app](../../examples/web-app) folder of this repository.
Click on the _New Deployment_ button:
You can also find a second version of the server in the [examples/web-app-v2](../../examples/web-app-v2) folder of this repository. The second version is identical to the first version with the only difference being that it returns the variables as an unordered list.
![](../images/octopod_deployment_blank.png)
![](../images/second.png)
### The config
We have already built and pushed the two versions of the application into the [typeable/octopod-web-app-example](https://hub.docker.com/repository/docker/typeable/octopod-web-app-example) DockerHub registry under the `v1` and `v2` tags.
Here you can fill your deployment parameters. Let's have it filled up!
## Setting up Octopod
![](../images/octopod_deployment_filled.png)
### Control scripts
**Name** ― we've chosen `wordpress`, but you can choose whatever name you like.
The only thing you need to do to configure _Octopod_ to work with your application is to write appropriate [_control scripts_](Control_scripts.md) to manipulate your deployments. We have already written the appropriate _control scripts_ for this application. You can find them in the [examples/helm-based-control-scripts](../../examples/helm-based-control-scripts) folder of this repository. The scripts are written in the _Rust_ programming language.
**Tag** ― `5.8.0`. We took the tag name from [chart parameters](https://github.com/bitnami/charts/blob/master/bitnami/wordpress/Chart.yaml#L4)
The most interesting of them all is the [create.rs](../../examples/helm-based-control-scripts/src/bin/create.rs) script. The basic order of operations is:
**App Overrides:**
1. Read the passed command-line arguments
2. Clone the repo to get the _charts_ used to deploy the application with _helm_
3. Generate the arguments that should be passed to _helm_
4. Call _helm_ with the downloaded _charts_ and the generated arguments
> 💡 **NOTE:** You might have noticed that there is no `update.rs`. That is because our application is stateless and packaged up into a single _chart_. This allows us to simply reuse the same script for both creating and updating a deployment. If you have a more complicated setup with a database, for example, you will most likely need a distinct implementation for `update`.
| Key | Value |
| -----------------: | ------------------ |
| `ingress.enabled` | `true` |
| `ingress.hostname` | `wordpress.lvh.me` |
### A word about TLS
If you are deploying Web applications, as we are here, you probably want to use TLS to encrypt your connections to your deployment. The most straightforward way of doing this is generating a separate TLS certificate for every deployment (for every subdomain). [_Cert Manager_][cert-manager] creates TLS certificates through [_Lets Encrypt_][lets-encrypt] and [_Lets Encrypt_][lets-encrypt] has [a limit on the amount of certificates][lets-encrypt-rate-limits] you can issue within a given time interval. If you exceed this limit you will start getting a _too many registrations for this IP_ error. If that is the case moving the [_Cert Manager_][cert-manager] _Pod_ might help.
We took these overrides from the [chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/wordpress#traffic-exposure-parameters). Basically you can tweak any parameters from there.
### Deploying Octopod
### Deploy
To deploy _Octopod_ you will need to follow the [_Octopod_ deployment guide](Octopod_deployment_guide.md). The only modification will be that you will replace the "Control Scripts Setup" section in the last step with the appropriate values.
When you have it all filled out. Click _Save_ button and wait until the deployment transitions to a _running_ state:
![](../images/octopod_deployment_filled.png)
These values point to a docker registry where we have already packaged up these _control scripts_ into a _Docker Image_.
Now you can click on a _wordpress_ link in the _links_ section and you'll be redirected to your wordpress instance:
![](../images/wordpress_blank.png)
```bash
#################################################
# Control Scripts Setup
#
# if you are just testing things out you can paste the values
# from the Helm Deployment Guide example
#################################################
So here you have it, your first Octopod deployment!
# The name of the registry with control scripts
utils_registry="typeable"
## Going further
# The name of the image with control scripts
utils_image="octopod-helm-example"
Right now you may be wondering, how did Octopod take wordpress from the bitnami repository when we haven't filled any repo information whatsoever? This is because we've set this up on the chart level [here](../../charts/octopod/values.yaml#L90).
# The tag of the image to use
utils_image_tag="1.1"
```
You can override all of this using deployment overrides. Let's dive right in!
## Testing out the deployment
First, let's archive our wordpress deployment:
### Setting up _octo CLI_
![](../images/octopod_archive.png)
Using the Web UI is fairly straightforward, so we will examine creating deployments with the [_octo CLI_](Octo_user_guide.md).
### The config
#### Setting up certificates
And create one more deployment, this time using a different set of overrides:
You will need to get the paths to `client_cert.pem` and `client_key.pem` generated in the [Creating SSL certificates](Octopod_deployment_guide.md#creating-ssl-certificates) step and place them into `TLS_CERT_PATH` and `TLS_KEY_PATH` environment variables:
![](../images/octopod_in_octopod_deployment.png)
```bash
export TLS_CERT_PATH=/tmp/octopod/certs/client_cert.pem
export TLS_KEY_PATH=/tmp/octopod/certs/client_key.pem
```
**Name:** octopod-internal
#### Setting up the API URL
**Tag:** 1.3.1
You will also need to set the power API URL (the `power_app_domain` value from the [Installing _Octopod Server_](Octopod_deployment_guide.md#installing-octopod-server) section) as the `OCTOPOD_URL` environment variable:
**App Overrides:**
```bash
export OCTOPOD_URL=<power_app_domain>
```
| Key | Value |
| --------------------: | ------------------------- |
| `octopod.baseDomain` | `octopod-internal.lvh.me` |
| `ingress.tls.enabled` | `false` |
### Creating a deployment
To create a deployment you can now run:
**Deployment Overrides:**
```bash
$ octo create -n hello-octopod -t v1 -e APP_ENV_KEY1=VALUE1
```
| Key | Value |
| ----------------: | ------------------------------------- |
| `chart_name` | `octopod` |
| `chart_repo_name` | `typeable` |
| `chart_repo_url` | `https://typeable.github.io/octopod/` |
| `chart_version` | `0.5.1` |
The options are:
- `-n hello-octopod` specifies that the name (subdomain) of the deployment should be `hello-octopod`
- `-t v1` specifies the version (Docker Image Tag) of the application to deploy to be `v1`
- `-e APP_ENV_KEY1=VALUE1` specifies adds an application-level key-value pair `APP_ENV_KEY1=VALUE1`
> 💡 **NOTE:** For more detail on _octo CLI_ options please see the [octo CLI user guide](Octo_user_guide.md).
As in the previous example we took the _App Overrides_ from the [chart documentation](../../charts/octopod/README.md#Parameters), but the _Deployment Overrides_ are passed as configuration to the control scripts. You can read more about these parameters in the [control script docs](../../helm-control-scripts/README.md).
This will run the `create` _control script_, which in turn will call `helm`. After waiting a couple of seconds you can visit `http://hello-octopod.<base_domain>` to see the running application:
![](../images/hello-octopod-1.png)
You can also see the deployed pod in the cluster using `kubectl`:
```bash
$ kubectl get pods -n deployment
NAME READY STATUS RESTARTS AGE
app-hello-octopod-8965856-qbwvq 1/1 Running 0 15s
```
### Adding an override
You can modify deployments by adding or removing overrides. To add a new application-level override run:
```bash
$ octo update -n hello-octopod -t v1 -e APP_ENV_KEY2=VALUE2
```
This will run the `update` _control script_ (which is identical to the `create` script in our case), which in turn will call `helm`. After waiting a few seconds you visit the deployment URL again and see the redeployed version:
![](../images/hello-octopod-2.png)
### Updating the deployment version
You can change the version (_Docker Image Tag_) of your deployment like so:
```bash
$ octo update -n hello-octopod -t v2
```
After waiting a few seconds you visit the deployment URL again and see the redeployed version:
![](../images/hello-octopod-3.png)
### Changing the number of replicas
You can change the number of replicas of your deployment (this is [essentially implemented in the _charts_ that we use](../../examples/web-app/charts/web-app/templates/deployment.yaml#L7)) like so:
```bash
$ octo update -n hello-octopod -t v2 -o replicas=3
```
`-o replicas=3` adds a deployment-level key-value pair (override) `replicas=3`.
You can verify that the new replicas have been deployed using `kubectl`:
```bash
$ kubectl get pods -n deployment
NAME READY STATUS RESTARTS AGE
app-hello-octopod-8965856-qbwvq 1/1 Running 0 97m
app-hello-octopod-8965856-v585c 1/1 Running 0 15s
app-hello-octopod-8965856-v88md 1/1 Running 0 15s
```
[cert-manager]: https://cert-manager.io/docs
[lets-encrypt]: https://letsencrypt.org
[lets-encrypt-rate-limits]: https://letsencrypt.org/docs/rate-limits
Now you have Octopod inside Octopod! Now you can install your own helm chart!

View File

@ -1,427 +1,39 @@
# Octopod Server deployment guide
<details>
<summary>Table of contents</summary>
## Installation options
There are several options to install octopod depending on your needs.
- [Installing required utilities](#installing-required-utilities)
- [Setting up your cluster](#setting-up-your-cluster)
- [General utilities](#general-utilities)
- [Tiller (Helm)](#tiller-helm)
- [Cluster access privileges](#cluster-access-privileges)
- [A word about TLS](#a-word-about-tls)
- [Downloading project sources code](#downloading-project-sources-code)
- [Creating required namespaces](#creating-required-namespaces)
- [Creating required _Service Accounts_](#creating-required-service-accounts)
- [Creating the actual service account](#creating-the-actual-service-account)
- [Giving the appropriate _Service Account_ roles](#giving-the-appropriate-service-account-roles)
- [Web UI authentication secrets](#web-ui-authentication-secrets)
- [_octo CLI_ authentication certificates](#octo-cli-authentication-certificates)
- [Creating SSL certificates](#creating-ssl-certificates)
- [Enabling SSL passthrough](#enabling-ssl-passthrough)
- [Setting up DNS](#setting-up-dns)
- [Deploying _Octopod_ on localhost](#deploying-octopod-on-localhost)
- [Installing _Octopod_ infrastructure](#installing-octopod-infrastructure)
- [Installing the appropriate _Storage Class_](#installing-the-appropriate-storage-class)
- [Installing the actual infrastructure](#installing-the-actual-infrastructure)
- [Installing _Octopod Server_](#installing-octopod-server)
### If you have a kubernetes cluster
You can install octopod in any kubernetes cluster using our [Helm chart](../../charts/octopod).
</details>
## Installing required utilities
Your cluster must satisfy following requirements:
- PVC support
- Ingress contoller ([ingress-nginx](https://kubernetes.github.io/ingress-nginx/)) installed
- Kubernetes version >= 1.19.0
Installing _Octopod Server_ in your cluster will require that you have the following tools installed on your system:
1. [_kubectl_][kubectl]
2. [_helm 2_][helm]
After ensuring that your cluster satisfies the requirements you can follow [the Helm installation instruction](../../charts/octopod/README.md) provided with our helm chart.
## Setting up your cluster
### If you want to try it locally
### General utilities
You can use [octopod_local_install.sh](../../octopod_local_install.sh) script to bootstrap kind cluster with octopod installed in it.
_Octopod Server_ requires the following utilities to be installed in your cluster:
Before running the script make sure that you have following tools installed:
- [docker](https://docs.docker.com/engine/install/)
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [helm 3](https://helm.sh/docs/intro/quickstart/#install-helm)
1. [_Ingress Nginx_][ingress-nginx]
2. [_Cert Manager_][cert-manager]
_Octopod Server_ require the following minimal resources to function properly: 2 CPU, 2 GB of RAM. Make sure you have sufficient resources in your cluster.
By default _Octopod Server_ will be deployed on nodes with the `role=stand` label. Please make sue you have the appropriate label set in your cluster:
After you have all of the necessary tools installed, you can run the script:
```bash
kubectl label node <your_node> role=stand
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/typeable/octopod/master/octopod_local_install.sh)"
```
### Tiller (Helm)
Script will expose octopod ui at `octopod.lvh.me`.
[_Tiller_][tiller] is a cluster-side service used by [_helm 2_][helm] to manage deployments. The easiest way to install it is using the following command:
`lvh.me` it's a special domain name which always resolves to `127.0.0.1`.
```bash
helm init
```
## What next?
#### Cluster access privileges
When installing _Octopod Server_ you might encounter [a problem with cluster access privileges](https://github.com/helm/helm/issues/5100) related to [_Tiller_][tiller].
To give sufficient privileges to [_Tiller_][tiller] you can use the following commands:
```bash
kubectl create -n kube-system serviceaccount tiller
kubectl --namespace kube-system create clusterrolebinding tiller-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
kubectl --namespace kube-system patch deploy tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
```
### A word about TLS
To function properly _Octopod_ needs to generate three TLS certificates for the three subdomains it will be using. [_Cert Manager_][cert-manager] creates TLS certificates through [_Lets Encrypt_][lets-encrypt] and [_Lets Encrypt_][lets-encrypt] has [a limit on the amount of certificates][lets-encrypt-rate-limits] you can issue within a given time interval. If you exceed this limit you will start getting a _too many registrations for this IP_ error. If that is the case moving the [_Cert Manager_][cert-manager] _Pod_ might help.
## Downloading project sources code
To download the source code required to install _Octopod Sever_ you will need to clone the git repository:
```bash
git clone --branch master https://github.com/typeable/octopod.git /tmp/octopod
```
## Creating required namespaces
_Octopod_ uses the following namespaces in your cluster:
1. `deployment` as the name would suggest your deployments will be installed in this namespace
2. `octopod` this namespace will be used to install the _Octopod_ infrastructure
To create the two namespaces you can use these commands:
```bash
kubectl create namespace deployment
kubectl create namespace octopod
```
## Creating required [_Service Accounts_][kubernetes-service-account]
### Creating the actual service account
_Octopod Server_ requires an `octopod` [_Service Account_][kubernetes-service-account] to function. You can create it using the following command:
```bash
kubectl create -n octopod serviceaccount octopod
```
### Giving the appropriate _Service Account_ roles
1. If you are planning to use [_helm 2_][helm] in your [_Control scripts_](Control_scripts.md) to deploy your deployments, you will need to give appropriate permissions to the `octopod` _Service Account_:
```bash
cd /tmp/octopod/charts
helm install --name octopod-helm-access ./helm-access
```
2. If you are planning to delete [_Persistent Volumes Claims_][kubernetes-pvc] in your [_Control scripts_](Control_scripts.md) (might be useful for the `cleanup` script), you will need to give appropriate permissions to the `octopod` _Service Account_:
```bash
cd /tmp/octopod/charts
helm install --name octopod-pvc-control ./pvc-control
```
3. If you are planning to use _Octopod_ to delete unused certificates in your [_Control scripts_](Control_scripts.md) (might be useful for the `cleanup` script), you will need to give appropriate permissions to the `octopod` _Service Account_:
```bash
cd /tmp/octopod/charts
helm install --name octopod-cert-control ./cert-control
```
4. If you are planning to use [_kubedog_][kubedog] to check the state of your deployments in your [_Control scripts_](Control_scripts.md) (might be useful for the `check` script), you will need to give appropriate permissions to the `octopod` _Service Account_:
```bash
cd /tmp/octopod/charts
helm install --name octopod-kubedog-access ./kubedog-access
```
5. If you are planning to delete [_Jobs_][kubernetes-job] in your [_Control scripts_](Control_scripts.md), you will need to give appropriate permissions to the `octopod` _Service Account_:
```bash
cd /tmp/octopod/charts
helm install --name octopod-job-control ./job-control
```
## Web UI authentication secrets
[Authentication](Security_model.md#web-ui-authentication) between _Octopod Server_ and the _Web UI_ is done through _Basic Auth_. This implies that there needs to be a username and password associated with it.
You can generate the username and password, and push into your cluster using the following command (of course you will want to generate a secure pair):
```bash
username="octopod"
password="password" # Please change it to a more secure password
kubectl create secret generic octopod-basic-auth -n octopod --from-literal=auth=$(htpasswd -bn $username $password)
```
## _octo CLI_ authentication certificates
### Creating SSL certificates
[Authentication](Security_model.md#octo-cli-authentication) between _octo CLI_ and _Octopod Server_ is performed through self-signed SSL certificates.
You can generate the certificates and push them into your cluster using the following commands:
```bash
mkdir certs
(cd certs && \
openssl req -x509 -newkey rsa:4096 -keyout server_key.pem -out server_cert.pem -nodes -subj "/CN=localhost/O=Server" && \
openssl req -newkey rsa:4096 -keyout client_key.pem -out client_csr.pem -nodes -subj "/CN=Client" && \
openssl x509 -req -in client_csr.pem -CA server_cert.pem -CAkey server_key.pem -out client_cert.pem -set_serial 01 -days 3650)
kubectl create configmap octopod-certs -n octopod --from-file=./certs
```
After executing these command you will find a new `certs` directory containing the certificates used for authentication between _octo CLI_ and _Octopod Server_. `client_key.pem` and `client_cert.pem` should then be [passed to _octo CLI_ through environment variables](Octo_user_guide.md#tls_cert_path-and-tls_key_path).
### Enabling SSL passthrough
Since we use custom self-signed SSL certificates for authentication, we will need the certificates used with requests to be passed to the server as-is without any modification. This is not support in default [_ingress-nginx_][ingress-nginx] configurations so you will most likely need to modify it manually.
Enabling SSL passthrough in [_ingress-nginx_][ingress-nginx] can be done by adding the `--enable-ssl-passthrough` command-line argument to the [_ingress-nginx_][ingress-nginx] config in your cluster.
To do this you can execute a command similar to this (you will need to lookup the names of the namespace and the deployment in your particular cluster):
```bash
kubectl edit deploy -n ingress-nginx ingress-nginx-controller
```
An editor with a YAML config should open up. You will need to modify to it have, among other things, this parameter:
```yaml
spec:
...
template:
...
spec:
...
containers:
...
- args:
...
- --enable-ssl-passthrough
```
## Setting up DNS
You will need to set up DNS records to point subdomains of your domain to the IP address of your cluster. The DNS record should look something like this:
```
*.octo.example.com A 1.2.3.4
octo.example.com A 1.2.3.4
```
### Deploying _Octopod_ on localhost
If you are deploying locally and don't have a separate domain you are trying set up, the lvh.me domain can be useful it is set up to point to `localhost` and you can use it to work with subdomains. Even so, deploying a fully-functional version of _Octopod_ on `localhost` is non-trivial and will require modifying the deployment _Charts_ to disable HTTPS redirects. (This guide does not cover that.)
## Installing _Octopod_ infrastructure
### Installing the appropriate _Storage Class_
Before installing the infrastructure you will first need to make sure you have a [_Storage Class_][kubernetes-storage-classes] named `default` installed in your cluster. You can check installed [_Storage Classes_][kubernetes-storage-classes] with the following command:
```bash
kubectl get storageclass
```
If you do not have it, you will need to install it. Installing the [_Storage Class_][kubernetes-storage-classes] in [_minikube_][minikube] can be done with the following command (you will need to modify it to suit your cluster hosting provider):
```bash
cat <<EOF | kubectl apply -f-
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: default
provisioner: k8s.io/minikube-hostpath
reclaimPolicy: Delete
volumeBindingMode: Immediate
EOF
```
### Installing the actual infrastructure
The only infrastructure _Octopod_ currently requires is _PostgreSQL_. You can install it in your cluster using the following command:
```bash
cd /tmp/octopod/charts
helm upgrade --install octopod-infra ./octopod-infra --namespace octopod\
--wait --timeout 600 --debug
```
## Installing _Octopod Server_
To install _Octopod Server_ in your cluster you will need to customize the variables in the following script and run it:
```bash
cd /tmp/octopod/charts
#################################################
# Octopod Images Setup
#
# you probably don't need to change it
#################################################
registry="typeable"
tag="1.3"
image="octopod"
octo_image="octo"
#################################################
# General Octopod Setup
#################################################
# The name of your project only used to display in the Web UI
project_name="MyProject"
# The email used to register Let's Encrypt SSL certificates
acme_registration_email="certbot@example.com"
#################################################
# Control Scripts Setup
#
# if you are just testing things out you can paste the values
# from the Helm Deployment Guide example
#################################################
# The name of the registry with control scripts
utils_registry="registry_name"
# The name of the image with control scripts
utils_image="utils"
# The tag of the image to use
utils_image_tag="1.0"
#################################################
# Web UI OAuth Authentication
#
# These parameters are passed to ingress-nginx to
# enable authentication for user accessing the
# Web UI.
#
# You can use OAuth2 Proxy to provide OAuth2 authentication.
#
# For more information see the Security Model doc.
#
# You can leave both these variables blank to disable
# authentication in your Web UI altogether.
#################################################
# URL for the OAuth authentication service
auth_url="https://oauth.exmaple.com/oauth2/auth"
# URL for the login page on the OAuth authentication service
auth_signin="https://oauth.exmaple.com/oauth2/start?rd=/redirect/$http_host$request_uri"
#################################################
# Domain Setup
#################################################
# The domain from which the Web UI should be served
domain="octo.example.com"
# The domain from which the user API should be served
# (used by the Web UI)
app_domain="api.octo.example.com"
# The domain from which the WebSocket notification service should be served
# (used by the Web UI)
ws_domain="ws.octo.example.com"
# The domain from which the power user API should be served
# (used by octo CLI)
power_app_domain="power.octo.example.com"
# The domain under which deployment subdomains should be created
base_domain="octo.example.com"
#################################################
# Basic Auth Setup
#
# These parameters should match the ones used in the
# "Web UI authentication secrets" step
#################################################
username="octopod"
password="password"
#################################################
# Kubernetes Dashboard Setup
#################################################
# You can supply a base url for your k8s dashboard you have set up
# to view the cluster, in which the Octopod will create deployments.
#
# Example URL to supply:
# https://dashboard.example.com
#
# If you supply a url, then there will be a "Details" button for
# every deployment which opens a filtered page of your k8s dashboard.
#
# If you leave this blank, the "Details" button will not be displayed in the
# Web UI.
kubernetes_dashboard_url=""
#################################################
# Other Setup
#################################################
# NOTE: on macOS you will need to replace `sha256sum` with `shasum -a 256`
sha256_sum=$(sha256sum octopod/values.yaml octopod/templates/* | awk '{print $1}' | sha256sum | awk '{print $1}')
base64_of_username_and_password=$(echo -n "$username:$password" | base64)
status_update_timeout=600
#################################################
# Actual installation in the cluster
#################################################
helm upgrade --install octopod ./octopod \
--namespace octopod \
--set "global.deploy_checksum=$sha256_sum" \
--set "global.image_prefix=$registry" \
--set "global.image_tag=$tag" \
--set "global.image=$image" \
--set "global.octo_image=$octo_image" \
--set "global.utils_image_prefix=$utils_registry" \
--set "global.utils_image=$utils_image" \
--set "global.utils_image_tag=$utils_image_tag" \
--set "global.acme_registration_email=$acme_registration_email" \
--set "global.auth_url=$auth_url" \
--set "global.auth_signin=$auth_signin" \
--set "basic_auth_token=$base64_of_username_and_password" \
--set "project_name=$project_name" \
--set "domain=$domain" \
--set "app_domain=$app_domain" \
--set "ws_domain=$ws_domain" \
--set "power_app_domain=$power_app_domain" \
--set "base_domain=$base_domain" \
--set "status_update_timeout=$status_update_timeout" \
--set "kubernetes_dashboard_url=$kubernetes_dashboard_url" \
--wait --timeout 600 --debug
```
[kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl/
[helm]: https://v2.helm.sh/docs/using_helm/#installing-helm
[ingress-nginx]: https://kubernetes.github.io/ingress-nginx
[cert-manager]: https://cert-manager.io/docs/
[kubernetes-service-account]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account
[kubernetes-pvc]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims
[kubernetes-storage-classes]: https://kubernetes.io/docs/concepts/storage/storage-classes
[kubernetes-job]: https://kubernetes.io/docs/concepts/workloads/controllers/job
[minikube]: https://kubernetes.io/ru/docs/tasks/tools/install-minikube/
[tiller]: https://v2.helm.sh/docs/install/
[kubedog]: https://github.com/werf/kubedog
[lets-encrypt]: https://letsencrypt.org
[lets-encrypt-rate-limits]: https://letsencrypt.org/docs/rate-limits
## Helm 3 chart
You can use a [Helm 3](../../charts/helm3/octopod) chart (beta!) to install octopod.
Now you may want to check how to [install helm charts](Helm-based_deployment_guide.md) with Octopod.

View File

@ -5,8 +5,6 @@
- [Octopod roles](#octopod-roles)
- [Kubernetes role-based access control](#kubernetes-role-based-access-control)
- [Privileges to delete certificates](#privileges-to-delete-certificates)
- [Privileges to delete _Persistent Volumes Claims_](#privileges-to-delete-persistent-volumes-claims)
- [Web UI authentication](#web-ui-authentication)
- [Web UI OAuth](#web-ui-oauth)
- [octo CLI authentication](#octo-cli-authentication)
@ -28,87 +26,30 @@ _Web UI_ users have the _user_ role.
_octo CLI_ users have the _admin_ role.
There is currently no way to give someone access to _octo CLI_ without giving them the _admin_ role since authentication is done through SSL certificates instead of through OAuth.
There is currently no way to give someone access to _octo CLI_ without giving them the _admin_ role since authentication is not done through OAuth.
## Kubernetes role-based access control
_Octopod Server_ is deployed in the `octopod` _Kubernetes_ namespace. Deployments are deployed in the `deployments` namespace.
_Octopod Server_ uses the `octopod` [_Service Account_][kubernetes-service-account].
Freeing resources might require _Octopod Server_ / _control scripts_ to have privileges to delete certificates and [_Persistent Volumes Claims_][kubernetes-pvc]. (It depends on the specifics of the _Kubernetes_ setup and _control scripts_)
Octopod needs sufficient permissions to run helm inside kubernetes and create all resources described in helm chart it is installing. Thus permissions are quite extensive.
Access can be configured through [_RBAC_][kubernetes-rbac]:
### Privileges to delete certificates
```yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-control-clusterrole
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates"]
verbs: ["list", "delete", "deletecollection"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: octopod-cert-control-rolebinding
namespace: deployments
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: cert-control-clusterrole
subjects:
- kind: ServiceAccount
name: octopod
namespace: octopod
```
### Privileges to delete _Persistent Volumes Claims_
```yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pvc-control-clusterrole
rules:
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["list", "delete", "deletecollection"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: octopod-pvc-control-rolebinding
namespace: deployments
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: pvc-control-clusterrole
subjects:
- kind: ServiceAccount
name: octopod
namespace: octopod
```
[RBAC][kubernetes-rbac] rules to describe permissions needed are added automatically by [the Octopod Helm Chart](../../charts/octopod/templates/rbac.yaml).
## Web UI authentication
Authentication between the _Web UI_ and _Octopod Server_ is done through _Basic Auth_. The _Bearer token_ is read by the _Web UI_ after the page is loaded as part of [the config](../../charts/octopod/templates/octopod-nginx-configmap.yaml#L15-L20). By default, everything, including the config, can be accessed without any authentication. For ways of mitigating this please see the next section.
Authentication between the _Web UI_ and _Octopod Server_ is done through _Basic Auth_. The _Bearer token_ is read by the _Web UI_ after the page is loaded as part of [the config](../../charts/octopod/templates/nginx-configmap.yaml#L23-L33). By default, everything, including the config, can be accessed without any authentication. For ways of mitigating this please see the next section.
## Web UI OAuth
The [_Web UI_](Technical_architecture.md#-web-ui) on its own does not have any authentication whatsoever, meaning that anyone can open it and manage your deployments. Luckily, _Kubernetes_ [can be configured](../../charts/octopod/templates/octopod-ingress.yaml#L15-L21) to authenticate users before they get access to the _Web UI_. It can be set up to authenticate users through [_Ingress_](https://kubernetes.io/docs/concepts/services-networking/ingress/) which [supports external authentication services][kubernetes-ingress-nginx-external-auth]. You can set up [_OAuth2 Proxy_][oauth2-proxy] in your cluster to support numerous OAuth services. For example, if you use GitHub, you can set up [_OAuth2 Proxy_][oauth2-proxy] to use GitHub to automatically grant users access to Octopod when you add them to your organization in GitHub.
The [_Web UI_](Technical_architecture.md#-web-ui) on its own does not have any authentication whatsoever, meaning that anyone can open it and manage your deployments. Luckily, _Kubernetes_ [can be configured](../../charts/octopod/README.md#configuration-and-installation-details) to authenticate users before they get access to the _Web UI_. It can be set up to authenticate users through [_Ingress_](https://kubernetes.io/docs/concepts/services-networking/ingress/) which [supports external authentication services][kubernetes-ingress-nginx-external-auth]. You can set up [_OAuth2 Proxy_][oauth2-proxy] in your cluster to support numerous OAuth services. For example, if you use GitHub, you can set up [_OAuth2 Proxy_][oauth2-proxy] to use GitHub to automatically grant users access to Octopod when you add them to your organization in GitHub.
## octo CLI authentication
Authentication between _octo CLI_ and _Octopod Server_ is done through an SSL certificate that is generated [when deploying _Octopod_](../en/Octopod_deployment_guide.md#creating-ssl-certificates).
Authentication between _octo CLI_ and _Octopod Server_ is done through special token which is generated automatically or specified by user in `octopod.cliAuthSecret` parameter, as described [here](../../charts/octopod/README.md#parameters)
[kubernetes-service-account]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account
[kubernetes-rbac]: https://kubernetes.io/docs/reference/access-authn-authz/rbac
[kubernetes-pvc]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims
[kubernetes-ingress-nginx-external-auth]: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#external-authentication
[kubernetes-rbac]: https://kubernetes.io/docs/reference/access-authn-authz/rbac
[oauth2-proxy]: https://oauth2-proxy.github.io/oauth2-proxy

Binary file not shown.

Before

Width:  |  Height:  |  Size: 146 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 143 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 149 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 147 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

View File

@ -1,165 +1,73 @@
# Helm-based deployment guide
# Установка Helm чартов с помощью Octopod
## Подгодовка
В этой инструкции вы узнаете как установить [чарт wordpress](https://github.com/bitnami/charts/tree/master/bitnami/wordpress) от bitnami с помощью Octopod.
Предполагается, что у вас в системе уже установлен Docker,
а также Вы установили _Octopod_. Если Вы _Octopod_ не установили, то пожалуйста воспользуйтесь [инструкцией по установке _Octopod_](Octopod_deployment_with_K8S.md).
При установке _Octopod_ потребуется указать название образа с [control scripts](Control_scripts.md):
```bash
--set "global.utils_image_prefix=typeable" \
--set "global.utils_image=octopod-helm-example" \
--set "global.utils_image_tag=1.0" \
```
## Ваш первый Deployment
В итоге должно получиться что-то вроде:
Прежде чем проолжить убедитесь, что уже установили Octopod, воспользовавшись нашей [инструкцией](Octopod_deployment_with_K8S.md).
Обратите внимание, что в этой инструкции мы предполагаем, что вы установили Octopod локально.
```bash
helm upgrade octopod ./octopod \
--install \
--namespace octopod \
--set "global.deploy_checksum=$sha256_sum" \
--set "global.image_prefix=typeable" \
--set "global.image_tag=1.0" \
--set "global.utils_image_prefix=typeable" \
--set "global.utils_image=octopod-helm-example" \
--set "global.utils_image_tag=1.1" \
--set "global.acme_registration_email=certbot@example.com" \
--set "global.auth_url=https://oauth.exmaple.com/oauth2/auth" \
--set "global.auth_signin=https://oauth.exmaple.com/oauth2/start?rd=/redirect/$http_host$request_uri" \
--set "project_name=MyProject" \
--set "domain=octopod.example.com" \
--set "app_domain=octopod-app.example.com" \
--set "power_app_domain=octopod-power-app.example.com" \
--set "ws_domain=octopod-ws.example.com" \
--set "base_domain=example.com" \
--set "status_update_timeout=600" \
--wait \
--timeout 600 \
--debug
```
Открыв Octopod в вашем браузере вы увидите следующее:
Для того чтобы развернуть приложение с помощью _Octopod_ потребуется само приложение и набор [control scripts](Control_scripts.md).
Разворачивать можно через _Web UI_ или _octo CLI_. В этом примере мы будем использовать только _octo CLI_.
Также потребуется Docker для запуска контейнера с _octo CLI_.
Перед использованием _octo CLI_ необходимо настроить доступ к _Octopod_, пожалуйста воспользуйтесь [руководством пользователя _octo CLI_](Octo_user_guide.md).
![](../images/octopod_blank.png)
В качестве примера мы будем использовать простое веб приложение, которое обслуживает единственный endpoint `/`.
По запросу на `/` будет возвращаться список переменных окружения, имена которых начинаются с `APP_ENV`, в HTML разметке.
С исходным кодом приложения можно ознакомиться, перейдя по ссылке [web-app](../../examples/web-app).
Это приложение было упаковано в докер образ и отправлено в регистри:
```bash
docker build -f docker/Dockerfile -t typeable/octopod-web-app-example:v1 .
docker push typeable/octopod-web-app-example:v1
```
Нажмите на кнопку New Deployment
Также мы будем производить обновление приложения до более новой версии.
В новой версии изменена HTML разметка: каждая переменная окружения выводится не в отдельном `div`, а в виде элемента списка `li`.
С исходным кодом обновленного приложения можно ознакомиться, перейдя по ссылке [web-app-v2](../../examples/web-app-v2).
Это приложение было упаковано в докер образ и отправлено в регистри:
```bash
docker build -f docker/Dockerfile -t typeable/octopod-web-app-example:v2 .
docker push typeable/octopod-web-app-example:v2
```
![](../images/octopod_deployment_blank.png)
Для работы веб приложению нужен 1 TLS сертификат для каждого развертывания. [_Cert Manager_][cert-manager] создает сертификаты через [_Lets Encrypt_][lets-encrypt].
У [_Lets Encrypt_][lets-encrypt] есть лимиты на создание сертификатов [_Lets Encrypt Rate Limits_][lets-encrypt-rate-limits].
При достижении лимита `too many registrations for this IP` может помочь перемещение Pod [_Cert Manager_][cert-manager] на другую ноду.
Тут вы можете заполнить все параметры для вашего Deployment'. Давайте же их заполним!
Набор [control scripts](Control_scripts.md) будет использовать helm v2.x, kubectl, kubedog для управления и проверки статуса деплойментов.
С исходным кодом [control scripts](Control_scripts.md) можно ознакомиться, перейдя по ссылке [helm-based-control-scripts](../../examples/helm-based-control-scripts).
Набор [control scripts](Control_scripts.md) был упакован в докер образ и отправлен в регистри:
![](../images/octopod_deployment_filled.png)
```bash
docker build -f docker/Dockerfile -t typeable/octopod-helm-example:1.0 .
docker push typeable/octopod-helm-example:1.0
```
**Примечание:**
В нашем примере используется Stateless приложение упакованное в один chart.
Так что для создания и обновления необходимо выполнить один набор команд (`helm upgrade --install ...`).
Поэтому [control scripts](Control_scripts.md) реализован только `create`, и он же используется вместо `update`.
В случае более сложного приложения упакованного в несколько chart-ов, возможно потребуется выполнить различный набор команд,
это потребует реализации различных `create` и `update` скриптов.
Name ― мы выбрали `wordpress`, но вы можете выбрать любое другое имя.
## Создание нового deployment
Tag ― `5.8.0`. Мы взяли его из [параметров чарта](https://github.com/bitnami/charts/blob/master/bitnami/wordpress/Chart.yaml#L4)
Для создания достаточно выполнить
```bash
octo create -n hello-octopod -t v1 -e APP_ENV_KEY1=VALUE1
```
App Overrides:
`ingress.enabled: true`
`ingress.hostname: wordpress.lvh.me`
- `-n hello-octopod` ― название deployment.
- `-t v1` ― тег deployment.
- `-e APP_ENV_KEY1=VALUE1` ― переменная окружения уровня приложения.
Эти параметры мы взяли из [документации чарта](https://github.com/bitnami/charts/tree/master/bitnami/wordpress#traffic-exposure-parameters). Вы можете поставить любые параметры указанные там.
Эта команда вызовет `create` из [control scripts](Control_scripts.md), a `create` вызовет `helm`.
Спустя некоторое время Вы увидите созданный _Pod_ нового deployment
```
$ kubectl get pods -n deployment
NAME READY STATUS RESTARTS AGE
app-hello-octopod-8965856-qbwvq 1/1 Running 0 15s
```
После того, как вы всё заполнили, нажмите на кнопку Save и дождитесь пока Deployment перейдёт в состояние Running.
Если откроете в браузере `http://hello-octopod.<домен, указанный при развертывании Octopod>` Вы увидите
![](../images/octopod_deployment_filled.png)
![После создания](../images/hello-octopod-1.png)
Теперь вы можете нажать на ссылку `wordpress` в столбце Links и будете переаправлены на ваш инстанс wordpress.
## Добавление новой переменной окружения уровня приложения
![](../images/wordpress_blank.png)
Для добавления новой переменной уровня приложения достаточно выполнить
```bash
octo update -n hello-octopod -t v1 -e APP_ENV_KEY2=VALUE2
```
Поздравляю, вы создали свой первый Deployment в Octopod!
- `-n hello-octopod` ― название deployment.
- `-t v1` ― тег deployment.
- `-e APP_ENV_KEY2=VALUE2` ― переменная окружения уровня приложения.
## Идём дальше
Эта команда вызовет `create` из [control scripts](Control_scripts.md), `create` вызовет `helm` (Смотри примечание раздела одготовка_).
Спустя несколько секунд, если откроете в браузере `http://hello-octopod.<домен указанный при развертывании Octopod>` Вы увидите новую переменную окружения уровня приложения:
Сейчас у вас скорее всего возник вопрос как Octopod установил чарт из репозитория bitnami, хотя мы не указывали вооюзе никаких параметров для их репозитория? Всё потому, что мы установили эти настройки на уровне чарта [тут](../../charts/octopod/values.yaml#L90).
Но вы можете переопределить все эти параметры. Давайте же приступим!
![После установки новой переменной окружения уровня приложения](../images/hello-octopod-2.png)
Сначала отправим уже ненужный Deployment с wordpress в архив.
## Обновление версии приложения
![](../images/octopod_archive.png)
Для обновления приложения до новой версии достаточно выполнить
```bash
octo update -n hello-octopod -t v2
```
И создадим ещё один новый Deployment, но теперь уже с другим списком Overrides.
- `-n hello-octopod` ― название deployment.
- `-t v2` ― тег deployment.
![](../images/octopod_in_octopod_deployment.png)
Эта команда вызовет `create` из [control scripts](Control_scripts.md), `create` вызовет `helm` (Смотри примечание раздела одготовка_).
Спустя несколько секунд, если откроете в браузере `http://hello-octopod.<домен указанный при развертывании Octopod>` Вы увидите обновленную HTML разметку:
Name: octopod-internal
![После создания](../images/hello-octopod-3.png)
Tag: 1.3.1
## Увеличение количества реплик через установку переменной окружения уровня deployment
App Overrides:
Для добавления новой переменной достаточно выполнить
```bash
octo update -n hello-octopod -t v2 -o replicas=3
```
`octopod.baseDomain: octopod-internal.lvh.me`
`ingress.tls.enabled: false`
- `-n hello-octopod` ― название deployment.
- `-t v1` ― тег deployment.
- `-o replicas=3` ― переменная окружения уровня deployment.
Deployment Overrides:
Эта команда вызовет `create` из [control scripts](Control_scripts.md), `create` вызовет `helm` (Смотри примечание раздела одготовка_).
`chart_name: octopod`
`chart_repo_name: typeable`
`chart_repo_url: https://typeable.github.io/octopod/`
`chart_version: 0.5.1`
Спустя некоторое время Вы увидите, что количество _Pod_-ов стало 3
```bash
$ kubectl get pods -n deployment
NAME READY STATUS RESTARTS AGE
app-hello-octopod-8965856-qbwvq 1/1 Running 0 97m
app-hello-octopod-8965856-v585c 1/1 Running 0 15s
app-hello-octopod-8965856-v88md 1/1 Running 0 15s
```
Точно так же как и в предыдущем примере все параметры мы взяли из [документации чарта](../../charts/octopod/README.md#Parameters), но Deployment Overrides это параметры для скриптов управления. Чтобы узнать больше про скрипты ознакомьтесь с [этой документацией](../../helm-control-scripts/README.md).
Если откроете в браузере `http://hello-octopod.<домен указанный при развертывании Octopod>`, Вы увидите, что приложение продолжает обсуживать запросы:
![После обновления переменной окружения уровня deployment](../images/hello-octopod-3.png)
[cert-manager]: https://cert-manager.io/docs
[lets-encrypt]: https://letsencrypt.org
[lets-encrypt-rate-limits]: https://letsencrypt.org/docs/rate-limits
Теперь у вас есть Octopod в Octopod'е! Осталось только установить свой helm чарт.

View File

@ -1,285 +1,30 @@
# Octopod deployment with K8S
# Руководство по установке Octopod
1. Установка необходимых утилит
## Варинаты установки
Есть несколько вариантов установки Octopod в зависимости от ваших предпочтений.
Для установки потребуется [_kubectl_][kubectl] и [_helm_][helm] версии 2.
### Если у вас есть свободный kubernetes кластер
Для установки Octopod на уже существуюший kubernetes вы можете восмользоваться [Helm чартом](../../charts/octopod).
Чтобы успешно установить Octopod ваш кластер должен соответствовать слежующи требованиям:
- Поддержка PVC
- Установленный Ingress контроллер ([ingress-nginx](https://kubernetes.github.io/ingress-nginx/)) installed
- Версия kubernetes >= 1.19.0
2. Настройка кластера
Если кластер соответствует требованиям вы можете воспользоваться [инструкцией](../../charts/octopod/README.md) для установки Octopod с помощью helm чарта.
_Octopod_ использует [_Ingress Nginx_][ingress-nginx], [_Cert Manager_][cert-manager]. Убедитесь, что они установленны в вашем кластере.
### Если хочется установить локально
Можно воспользоваться скриптом [octopod_local_install.sh](../../octopod_local_install.sh) для разворачивания Octopod в кластере kind на локальной машине.
Для работы _Octopod_ нужны 3 TLS сертификата. [_Cert Manager_][cert-manager] создает сертификаты через [_Lets Encrypt_][lets-encrypt].
У [_Lets Encrypt_][lets-encrypt] есть лимиты на создание сертификатов [_Lets Encrypt Rate Limits_][lets-encrypt-rate-limits].
При достижении лимита `too many registrations for this IP` может помочь перемещение Pod [_Cert Manager_][cert-manager] на другую ноду.
Перед запуском скрипта убедитесь, что у вас установлены следующие утилиты:
- [docker](https://docs.docker.com/engine/install/)
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [helm 3](https://helm.sh/docs/intro/quickstart/#install-helm)
_Octopod_ необходимо не более 2 CPU и 2Gb оперативной памяти, пожалуйста убедитесь что у вас есть необходимые ресурсы.
После установки зависимостей просто запустите скрипт `./octopod_local_install.sh`
В результате работы скрипта у вас будет возможность подлючиться к Octopod на домене `octopod.lvh.me`.
`lvh.me` это специальный домен который всегда резолвится на `127.0.0.1`
_Octopod_ будет развернут на ноду с label `role=stand`, пожалуйста добавте label:
```
kubectl label node <your_node> role=stand
```
## Что дальше?
3. Также вам потребуется [_Tiller_][tiller] сервис на стороне кластера, который необходим для работы [_helm 2_][helm]. Самый простой способ его установить это:
```bash
helm init
```
Если Вы установили [_Tiller_][tiller], у вас может возникнуть [проблемы с правами доступа](https://github.com/helm/helm/issues/5100) у [_Tiller_][tiller].
Чтобы выдать ему необходимые права будет достаточно выполнить следующие команды:
```bash
kubectl create -n kube-system serviceaccount tiller
kubectl --namespace kube-system create clusterrolebinding tiller-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
kubectl --namespace kube-system patch deploy tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
```
4. Скачивание исходного кода проекта
```bash
git clone --branch master https://github.com/typeable/octopod.git /tmp/octopod
```
5. Создание namespace `deployment` и `octopod`
```bash
kubectl create namespace deployment
kubectl create namespace octopod
```
6. Создание [_Service Account_][kubernetes-service-account] `octopod`
```bash
kubectl create -n octopod serviceaccount octopod
```
7. Создание секрета, который будет использоваться для Basic Auth между _Web UI_ и _Octopod Server_
```bash
username="octopod"
password="password" # пожалуйста поменяйте на более безопасный пароль
kubectl create secret generic octopod-basic-auth -n octopod --from-literal=auth=$(htpasswd -bn $username $password)
```
8. Создание ConfigMap с сертификатами и ключами, которые будут использоваться для аутентификации между _octo CLI_ и _Octopod Server_
```bash
mkdir certs
(cd certs && \
openssl req -x509 -newkey rsa:4096 -keyout server_key.pem -out server_cert.pem -nodes -subj "/CN=localhost/O=Server" && \
openssl req -newkey rsa:4096 -keyout client_key.pem -out client_csr.pem -nodes -subj "/CN=Client" && \
openssl x509 -req -in client_csr.pem -CA server_cert.pem -CAkey server_key.pem -out client_cert.pem -set_serial 01 -days 3650)
kubectl create configmap octopod-certs -n octopod --from-file=./certs
```
После этого директория `certs` будет содержать сертификаты, используемый для авторизации между _octo CLI_ и _Octopod Server_. `client_key.pem` и `client_cert.pem` необходимо будет [через переменные окружения передать в _octo CLI_](Octo_user_guide.md).
9. Далее Вам необходимо будет настроить DNS чтобы поддомены вашего домена указывали на адрес вашего кластера:
```
*.octo.example.com A 1.2.3.4
```
10. Так как авторизация между _octo CLI_ и _Octopod Server_ будет производиться через SSL сертификаты, которые мы только что сами создали, нужно чтобы используемый в кластере [_ingress-nginx_][ingress-nginx] поддерживал _SSL passthrough_ чтобы мы могли ему сказать "не трогай сертификаты".
Чтобы этого добиться нам необходимо добавить `--enable-ssl-passthrough` в аргументы запуска [_ingress-nginx_][ingress-nginx] контроллера. Сделать это можно с командой вроде этой (конкретный namespace и название деплоймента нужно будет смотреть в вашем конкретном кластере):
```bash
kubectl edit deploy -n ingress-nginx nginx-ingress-ingress-nginx-controller
```
Помимо всех прочих параметров необходимо чтобы там был указан этот:
```yaml
spec:
...
template:
...
spec:
...
containers:
...
- args:
...
- --enable-ssl-passthrough
```
11. Установка инфраструктуры Octopod
Перед началом установки убедитесь, что у вас в кластере есть [_Storage Class_][kubernetes-storage-classes] `default`.
Для [_minikube_][minikube] вы можете создать [_Storage Class_][kubernetes-storage-classes] `default` так:
```bash
cat <<EOF | kubectl apply -f-
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: default
provisioner: k8s.io/minikube-hostpath
reclaimPolicy: Delete
volumeBindingMode: Immediate
EOF
```
```bash
cd /tmp/octopod/charts
helm upgrade --install octopod-infra ./octopod-infra --namespace octopod \
--wait --timeout 600 --debug
```
12. Выдача прав [_Service Account_][kubernetes-service-account] `octopod`
1. Eсли вы планируете использовать helm версии 2 для развертывания стейджингов,
то для этого необходимо выдать права [_Service Account_][kubernetes-service-account] `octopod`:
```bash
cd /tmp/octopod/charts
helm install --name octopod-helm-access ./helm-access
```
2. Eсли вы планируете в `cleanup` из [_Control scripts_](Control_scripts.md) удалять [_Persistent Volumes Claims_][kubernetes-pvc], то для этого необходимо выдать права [_Service Account_][kubernetes-service-account] `octopod`:
```bash
cd /tmp/octopod/charts
helm install --name octopod-pvc-control ./pvc-control
```
3. Eсли вы используете [_Cert Manager_][cert-manager] и планируете в `cleanup` из [_Control scripts_](Control_scripts.md) удалять сертификаты созданные [_cert-manager_][cert-manager], то для этого необходимо выдать права [_Service Account_][kubernetes-service-account] `octopod`:
```bash
cd /tmp/octopod/charts
helm install --name octopod-cert-control ./cert-control
```
4. Если Вы планируете использовать [_kubedog_][kubedog] в `check` из [_Control scripts_](Control_scripts.md) для проверки состояния развертывания, то Вам необходимо будет выдать соответствующие права [_Service Account_][kubernetes-service-account] `octopod`:
```bash
cd /tmp/octopod/charts
helm install --name octopod-kubedog-access ./kubedog-access
```
5. Eсли вы планируете из [_Control scripts_](Control_scripts.md) удалять [_Jobs_][kubernetes-job], то для этого необходимо выдать права [_Service Account_][kubernetes-service-account] `octopod`:
```bash
cd /tmp/octopod/charts
helm install --name octopod-job-control ./job-control
```
13. Установка сервера Octopod
```bash
cd /tmp/octopod/charts
# название проекта
project_name="MyProject"
# Название вашего docker registry, где находятся docker образы с Octopod Server, octo CLI, Control scripts, sqitch
registry="typeable"
# tag Octopod Server
tag="1.3"
# название docker образа Octopod Server
image="octopod"
# название docker образа _octo CLI_
octo_image="octo"
# Название вашего docker registry, где находится docker образ написанными Вами Control Scripts.
utils_registry="registry_name"
# название docker образа _Control scripts_
utils_image="utils"
# tag _Control scripts_
utils_image_tag="1.0"
# email используемый для регистрации в Lets Encrypt
acme_registration_email="certbot@example.com"
# Это не обязательный параметр, если не определить, то доступ к Web UI не будет ограничен аутентификацией.
# Этот парамет работает в паре с auth_signin, надо либо оба не определять, либо оба определять.
# Если этот параметр определен, то он передается в ingress-nginx и используется для ограничения доступа через через внешний сервис, например OAuth.
# Для ограничения доступа через OAuth можно использовать OAuth2 Proxy (Смотри примечание ниже).
auth_url="https://oauth.exmaple.com/oauth2/auth"
# Это не обязательный параметр, если не определить, то доступ к Web UI не будет ограничен аутентификацией.
# Этот парамет работает в паре с auth_url, надо либо оба не определять, либо оба определять.
# Если этот параметр определен, то он передается в ingress-nginx и используется для ограничения доступа через через внешний сервис, например OAuth.
# Для ограничения доступа через OAuth можно использовать OAuth2 Proxy (Смотри примечание ниже).
auth_signin="https://oauth.exmaple.com/oauth2/start?rd=/redirect/$http_host$request_uri"
# domain по которому будет доступен UI Web
domain="octo.example.com"
# домен по которому будет доступен Octopod Server для HTTP запросов
app_domain="api.octo.example.com"
# домен по которому будет доступен Octopod Server для WebSocket запросов
ws_domain="ws.octo.example.com"
# домен по которому будет доступен Octopod Server для HTTP запросов от octo CLI
power_app_domain="power.octo.example.com"
# домен который будет использоваться для создания доменов deployment-ов
base_domain="octo.example.com"
# таймаут в секундах после которого обновление состояния deployment считается неудачным
status_update_timeout=600
# контрольная сумма chart'а
# NOTE: на macOS нужно будет `sha256sum` заменить на `shasum -a 256`
sha256_sum=$(sha256sum octopod/values.yaml octopod/templates/* | awk '{print $1}' | sha256sum | awk '{print $1}')
# имя пользователя, должно совпадать с именем пользователя из шага 7
username="octopod"
# пароль, должен совпадать с паролем из шага 7
password="password"
# base64 от имя пользователя и пароля
base64_of_username_and_password=$(echo -n "$username:$password" | base64)
helm upgrade --install octopod ./octopod \
--namespace octopod \
--set "global.deploy_checksum=$sha256_sum" \
--set "global.image_prefix=$registry" \
--set "global.image_tag=$tag" \
--set "global.image=$image" \
--set "global.octo_image=$octo_image" \
--set "global.utils_image_prefix=$utils_registry" \
--set "global.utils_image=$utils_image" \
--set "global.utils_image_tag=$utils_image_tag" \
--set "global.acme_registration_email=$acme_registration_email" \
--set "global.auth_url=$auth_url" \
--set "global.auth_signin=$auth_signin" \
--set "basic_auth_token=$base64_of_username_and_password" \
--set "project_name=$project_name" \
--set "domain=$domain" \
--set "app_domain=$app_domain" \
--set "ws_domain=$ws_domain" \
--set "power_app_domain=$power_app_domain" \
--set "base_domain=$base_domain" \
--set "status_update_timeout=$status_update_timeout" \
--wait --timeout 600 --debug
```
**Примечание**: Более подробно про настройку OAuth описано в [_Security model_](Security_model.md#users-auth-example-with-oauth)
[kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl
[helm]: https://v2.helm.sh/docs/using_helm/#installing-helm
[ingress-nginx]: https://kubernetes.github.io/ingress-nginx
[cert-manager]: https://cert-manager.io/docs
[lets-encrypt]: https://letsencrypt.org
[lets-encrypt-rate-limits]: https://letsencrypt.org/docs/rate-limits
[kubernetes-service-account]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account
[kubernetes-pvc]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims
[kubernetes-storage-classes]: https://kubernetes.io/docs/concepts/storage/storage-classes
[kubernetes-job]: https://kubernetes.io/docs/concepts/workloads/controllers/job
[minikube]: https://kubernetes.io/ru/docs/tasks/tools/install-minikube
[tiller]: https://v2.helm.sh/docs/install
[kubedog]: https://github.com/werf/kubedog
## Helm 3
Для установки Octopod вы также можете использовать чарт [Helm 3](../../charts/helm3/octopod) (бета версия!).
Сейчас вы, возможно захотите пройти инструкцию по [установке helm чартов](Helm-based_deployment_guide.md) c помощью Octopod.

View File

@ -1 +0,0 @@
/target

File diff suppressed because it is too large Load Diff

View File

@ -1,50 +0,0 @@
[package]
name = "helm-based-control-scripts"
version = "0.1.0"
authors = ["Typeable LLC <octopod@typeable.io>"]
edition = "2018"
[dependencies]
clap = "2.33.3"
rand = "0.8.3"
serde = "1.0.123"
serde_derive = "1.0.123"
serde_json = "1.0.62"
tokio = { version = "1", default_features = false, features = ["full"] }
reqwest = { version = "0.11.1", default_features = false, features = ["json", "rustls-tls"] }
[[bin]]
name = "create"
path = "src/bin/create.rs"
[[bin]]
name = "archive"
path = "src/bin/archive.rs"
[[bin]]
name = "check"
path = "src/bin/check.rs"
[[bin]]
name = "cleanup"
path = "src/bin/cleanup.rs"
[[bin]]
name = "archive_check"
path = "src/bin/archive_check.rs"
[[bin]]
name = "tag_check"
path = "src/bin/tag_check.rs"
[[bin]]
name = "init"
path = "src/bin/init.rs"
[[bin]]
name = "info"
path = "src/bin/info.rs"
[[bin]]
name = "notifications"
path = "src/bin/notifications.rs"

View File

@ -1,9 +0,0 @@
# helm-based-control-scripts
An Example of Helm-based control scripts.
## Build Docker image
```bash
docker build -f docker/Dockerfile -t typeable/octopod-helm-example .
```

View File

@ -1,30 +0,0 @@
FROM ekidd/rust-musl-builder:1.46.0 as builder
RUN sudo mkdir /app && \
sudo chown -R rust:rust /app
WORKDIR /app
RUN USER=root cargo new helm-based-control-scripts
COPY Cargo.toml /app/helm-based-control-scripts/
COPY Cargo.lock /app/helm-based-control-scripts/
COPY src /app/helm-based-control-scripts/src
WORKDIR /app/helm-based-control-scripts
RUN cargo build --release
FROM typeable/kube-things as base-utils
FROM busybox
COPY --from=builder /app/helm-based-control-scripts/target/x86_64-unknown-linux-musl/release/create /utils/create
COPY --from=builder /app/helm-based-control-scripts/target/x86_64-unknown-linux-musl/release/create /utils/update
COPY --from=builder /app/helm-based-control-scripts/target/x86_64-unknown-linux-musl/release/archive /utils/archive
COPY --from=builder /app/helm-based-control-scripts/target/x86_64-unknown-linux-musl/release/check /utils/check
COPY --from=builder /app/helm-based-control-scripts/target/x86_64-unknown-linux-musl/release/cleanup /utils/cleanup
COPY --from=builder /app/helm-based-control-scripts/target/x86_64-unknown-linux-musl/release/archive_check /utils/archive_check
COPY --from=builder /app/helm-based-control-scripts/target/x86_64-unknown-linux-musl/release/tag_check /utils/tag_check
COPY --from=builder /app/helm-based-control-scripts/target/x86_64-unknown-linux-musl/release/init /utils/init
COPY --from=builder /app/helm-based-control-scripts/target/x86_64-unknown-linux-musl/release/info /utils/info
COPY --from=builder /app/helm-based-control-scripts/target/x86_64-unknown-linux-musl/release/notifications /utils/notifications
COPY --from=base-utils /utils/kubectl /utils/kubectl
COPY --from=base-utils /utils/helm2 /utils/helm
COPY --from=base-utils /utils/kubedog /utils/kubedog

View File

@ -1,59 +0,0 @@
use clap::{App, Arg};
use std::process::{exit, Command};
fn main() -> std::io::Result<()> {
let matches = App::new("archive")
.version("0.1")
.arg(
Arg::with_name("project-name")
.long("project-name")
.short("p")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("base-domain")
.long("base-domain")
.short("d")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("namespace")
.long("namespace")
.short("s")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("name")
.long("name")
.short("n")
.required(true)
.takes_value(true),
)
.get_matches();
let _project_name = matches
.value_of("project-name")
.expect("could not get project-name");
let _base_domain = matches
.value_of("base-domain")
.expect("could not get base-domain");
let _namespace = matches
.value_of("namespace")
.expect("could not get namepace");
let name = matches.value_of("name").expect("could not get name");
let output = Command::new("helm")
.args(&["delete", &format!("app-{}", name), "--purge"])
.output()
.expect("could not delete app");
let success = output.status.success();
if !success {
exit(1)
}
Ok(())
}

View File

@ -1,59 +0,0 @@
use clap::{App, Arg};
use std::process::{exit, Command};
fn main() -> std::io::Result<()> {
let matches = App::new("archive_check")
.version("0.1")
.arg(
Arg::with_name("project-name")
.long("project-name")
.short("p")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("base-domain")
.long("base-domain")
.short("d")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("namespace")
.long("namespace")
.short("s")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("name")
.long("name")
.short("n")
.required(true)
.takes_value(true),
)
.get_matches();
let _project_name = matches
.value_of("project-name")
.expect("could not get project-name");
let _base_domain = matches
.value_of("base-domain")
.expect("could not get base-domain");
let _namespace = matches
.value_of("namespace")
.expect("could not get namepace");
let name = matches.value_of("name").expect("could not get name");
let output = Command::new("helm")
.args(&["status", name])
.output()
.expect("could not get status");
let success = output.status.success();
if success {
exit(1)
}
Ok(())
}

View File

@ -1,86 +0,0 @@
use clap::{App, Arg};
use serde_json::json;
use std::io::Write;
use std::process::{exit, Command, Stdio};
const KUBEDOG_TIMEOUT: usize = 3;
fn main() -> std::io::Result<()> {
let matches = App::new("check")
.version("0.1")
.arg(
Arg::with_name("project-name")
.long("project-name")
.short("p")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("base-domain")
.long("base-domain")
.short("d")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("namespace")
.long("namespace")
.short("s")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("name")
.long("name")
.short("n")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("tag")
.long("tag")
.short("t")
.required(true)
.takes_value(true),
)
.get_matches();
let _project_name = matches
.value_of("project-name")
.expect("could not get project-name");
let _base_domain = matches
.value_of("base-domain")
.expect("could not get base-domain");
let namespace = matches
.value_of("namespace")
.expect("could not get namepace");
let name = matches.value_of("name").expect("could not get name");
let _tag = matches.value_of("tag").expect("could not get tag");
let kubedog_stdin = json!({
"Deployments": [{"ResourceName": format!("app-{}", name), "Namespace": namespace}]
})
.to_string();
let mut child = Command::new("kubedog")
.args(&["multitrack", "-t", &KUBEDOG_TIMEOUT.to_string()])
.stdin(Stdio::piped())
.spawn()
.expect("failed to call kubedog");
{
let stdin = child.stdin.as_mut().expect("failed to open stdin");
stdin
.write_all(kubedog_stdin.as_bytes())
.expect("failed to write to stdin");
}
let output = child.wait_with_output().expect("failed to read stdout");
let success = output.status.success();
if !success {
exit(1)
}
Ok(())
}

View File

@ -1,50 +0,0 @@
use clap::{App, Arg};
fn main() -> std::io::Result<()> {
let matches = App::new("cleanup")
.version("0.1")
.arg(
Arg::with_name("project-name")
.long("project-name")
.short("p")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("base-domain")
.long("base-domain")
.short("d")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("namespace")
.long("namespace")
.short("s")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("name")
.long("name")
.short("n")
.required(true)
.takes_value(true),
)
.get_matches();
let _project_name = matches
.value_of("project-name")
.expect("could not get project-name");
let _base_domain = matches
.value_of("base-domain")
.expect("could not get base-domain");
let _namespace = matches
.value_of("namespace")
.expect("could not get namepace");
let _name = matches.value_of("name").expect("could not get name");
// nop
Ok(())
}

View File

@ -1,236 +0,0 @@
use clap::{App, Arg};
use rand::{
distributions::{Alphanumeric, Distribution},
thread_rng,
};
use std::convert::TryFrom;
use std::convert::TryInto;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::process::{exit, Command};
static REGISTRY: &str = "typeable";
static IMAGE: &str = "octopod-web-app-example";
static GIT_REPOSITORY: &str = "https://github.com/typeable/octopod.git";
struct TmpDirGuard(PathBuf);
impl TmpDirGuard {
pub fn new(tmp_dir: &PathBuf) -> Self {
TmpDirGuard(tmp_dir.clone())
}
}
impl Drop for TmpDirGuard {
fn drop(&mut self) {
match fs::remove_dir_all(&self.0) {
Ok(_) => (),
Err(err) => eprintln!("remove_dir_all error: {:?}", err),
};
}
}
#[derive(Debug, Clone)]
struct Override {
pub key: String,
pub value: String,
}
impl TryFrom<&str> for Override {
type Error = &'static str;
fn try_from(s: &str) -> Result<Self, Self::Error> {
let parts = s.split('=').collect::<Vec<_>>();
if parts.len() == 2 {
Ok(Override {
key: parts[0].to_string(),
value: parts[1].to_string(),
})
} else {
Err("Malformed environment key-value pair, should be similar to FOO=bar")
}
}
}
impl ToString for Override {
fn to_string(&self) -> String {
format!("{}={}", self.key, self.value)
}
}
fn main() -> std::io::Result<()> {
let matches = App::new("create")
.version("0.1")
.arg(
Arg::with_name("project-name")
.long("project-name")
.short("p")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("base-domain")
.long("base-domain")
.short("d")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("namespace")
.long("namespace")
.short("s")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("name")
.long("name")
.short("n")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("tag")
.long("tag")
.short("t")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("app-env-override")
.long("app-env-override")
.short("e")
.required(false)
.multiple(true)
.takes_value(true),
)
.arg(
Arg::with_name("deployment-override")
.long("deployment-override")
.short("o")
.required(false)
.multiple(true)
.takes_value(true),
)
.get_matches();
let _project_name = matches
.value_of("project-name")
.expect("could not get project-name");
let base_domain = matches
.value_of("base-domain")
.expect("could not get base-domain");
let namespace = matches
.value_of("namespace")
.expect("could not get namepace");
let name = matches.value_of("name").expect("could not get name");
let tag = matches.value_of("tag").expect("could not get tag");
let app_env_overrides = matches
.values_of("app-env-override")
.unwrap_or_else(Default::default)
.map(|e| e.try_into().expect("could not get valid key=value"))
.collect::<Vec<Override>>();
let deployment_overrides = matches
.values_of("deployment-override")
.unwrap_or_else(Default::default)
.map(|e| e.try_into().expect("could not get valid key=value"))
.collect::<Vec<Override>>();
let tmp_dir = tmp_dir();
let work_dir = Path::new("/tmp").join(tmp_dir);
let _guard = TmpDirGuard::new(&work_dir);
fs::create_dir(&work_dir)?;
let success = clone_and_prepare_repo(&work_dir);
let output = Command::new("helm")
.args(command_args(
base_domain,
namespace,
name,
tag,
app_env_overrides,
deployment_overrides,
))
.current_dir(&work_dir)
.output()
.expect("could not create app");
let success2 = output.status.success();
if !(success && success2) {
exit(1)
}
Ok(())
}
fn tmp_dir() -> String {
const LENGTH: usize = 10;
let rng = thread_rng();
let random_string: String = Alphanumeric
.sample_iter(rng)
.take(LENGTH)
.map(char::from)
.collect();
format!("octopod-{}", random_string)
}
fn clone_and_prepare_repo(work_dir: &PathBuf) -> bool {
let output = Command::new("git")
.args(&["clone", "--recursive", "--depth=1", GIT_REPOSITORY, "."])
.current_dir(work_dir)
.output()
.expect("could not clone repo");
output.status.success()
}
fn command_args(
domain: &str,
namespace: &str,
name: &str,
tag: &str,
app_env_overrides: Vec<Override>,
deployment_overrides: Vec<Override>,
) -> Vec<String> {
let mut args = vec![
"upgrade",
"--install",
"--namespace",
namespace,
&format!("app-{}", name),
"examples/web-app/charts/web-app",
"--set",
&format!("image_prefix={}", REGISTRY),
"--set",
&format!("image={}", IMAGE),
"--set",
&format!("image_tag={}", tag),
"--set",
&format!("domain={}.{}", name, domain),
"--wait",
"--timeout",
"300",
"--debug",
]
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>();
let mut app_args = app_env_overrides
.into_iter()
.map(|e| vec!["--set".to_string(), format!("env.{}", e.to_string())])
.flatten()
.collect::<Vec<_>>();
args.append(&mut app_args);
let mut deployment_args = deployment_overrides
.into_iter()
.map(|e| vec!["--set".to_string(), e.to_string()])
.flatten()
.collect::<Vec<_>>();
args.append(&mut deployment_args);
args
}

View File

@ -1,52 +0,0 @@
use clap::{App, Arg};
fn main() -> std::io::Result<()> {
let matches = App::new("cleanup")
.version("0.1")
.arg(
Arg::with_name("project-name")
.long("project-name")
.short("p")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("base-domain")
.long("base-domain")
.short("d")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("namespace")
.long("namespace")
.short("s")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("name")
.long("name")
.short("n")
.required(true)
.takes_value(true),
)
.get_matches();
let _project_name = matches
.value_of("project-name")
.expect("could not get project-name");
let base_domain = matches
.value_of("base-domain")
.expect("could not get base-domain");
let _namespace = matches
.value_of("namespace")
.expect("could not get namepace");
let name = matches.value_of("name").expect("could not get name");
[("app", format!("https://{}.{}", name, base_domain))]
.iter()
.for_each(|(key, value)| println!("{},{}", key, value));
Ok(())
}

View File

@ -1,9 +0,0 @@
use std::env;
fn main() -> std::io::Result<()> {
let _home = env::var("HOME").expect("could not get $HOME");
// nop
Ok(())
}

View File

@ -1,76 +0,0 @@
use clap::{App, Arg};
fn main() -> std::io::Result<()> {
let matches = App::new("notifications")
.version("0.1")
.arg(
Arg::with_name("project-name")
.long("project-name")
.short("p")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("base-domain")
.long("base-domain")
.short("d")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("namespace")
.long("namespace")
.short("s")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("name")
.long("name")
.short("n")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("tag")
.long("tag")
.short("t")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("old-status")
.long("old-status")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("new-status")
.long("new-status")
.required(true)
.takes_value(true),
)
.get_matches();
let _project_name = matches
.value_of("project-name")
.expect("could not get project-name");
let _base_domain = matches
.value_of("base-domain")
.expect("could not get base-domain");
let _namespace = matches
.value_of("namespace")
.expect("could not get namepace");
let _name = matches.value_of("name").expect("could not get name");
let _tag = matches.value_of("tag").expect("could not get tag");
let _old_status = matches
.value_of("old-status")
.expect("could not get old-status");
let _new_status = matches
.value_of("new-status")
.expect("could not get new-status");
// nop
Ok(())
}

View File

@ -1,91 +0,0 @@
use clap::{App, Arg};
use serde_derive::Deserialize;
use std::process::exit;
static REGISTRY: &str = "typeable";
static REPOSITORY: &str = "octopod-web-app-example";
#[derive(Debug, Deserialize)]
struct Resp {
results: Vec<Tag>,
}
#[derive(Debug, Deserialize)]
struct Tag {
name: String,
}
async fn do_request() -> reqwest::Result<Resp> {
let url = format!(
"https://hub.docker.com/v2/repositories/{}/{}/tags",
REGISTRY, REPOSITORY
);
let body = reqwest::get(&url).await?.json::<Resp>().await?;
Ok(body)
}
#[tokio::main]
async fn main() -> std::io::Result<()> {
let matches = App::new("tag_check")
.version("0.1")
.arg(
Arg::with_name("project-name")
.long("project-name")
.short("p")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("base-domain")
.long("base-domain")
.short("d")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("namespace")
.long("namespace")
.short("s")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("name")
.long("name")
.short("n")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("tag")
.long("tag")
.short("t")
.required(true)
.takes_value(true),
)
.get_matches();
let _project_name = matches
.value_of("project-name")
.expect("could not get project-name");
let _base_domain = matches
.value_of("base-domain")
.expect("could not get base-domain");
let _namespace = matches
.value_of("namespace")
.expect("could not get namepace");
let _name = matches.value_of("name").expect("could not get name");
let tag = matches.value_of("tag").expect("could not get tag");
let mut tag_found = false;
match do_request().await {
Ok(resp) => tag_found = resp.results.iter().any(|t| t.name == tag),
Err(err) => eprintln!("could not get tags, reason: {:?}", err),
}
if !tag_found {
exit(1)
}
Ok(())
}

View File

@ -1 +0,0 @@
/target

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +0,0 @@
[package]
name = "web-app"
version = "0.2.0"
authors = ["Typeable LLC <octopod@typeable.io>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
actix-web = "3.3.2"
actix-rt = "1.1.1"
askama = "0.10.5"

View File

@ -1,5 +0,0 @@
# Build Docker image
```bash
docker build -f docker/Dockerfile -t typeable/octopod-web-app-example:v2 .
```

Some files were not shown because too many files have changed in this diff Show More