I adjusted the versions of the charts of various services and solved that problem by using mender 3.4. However I’m facing now another problem where mender-deployments
in in crash loopback. To be honest the documentation is a mess. I tried several mender versions starting from 3.1.
Here are the commands I used that. It’s not a full-auto install script.
curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 --data-dir /mnt
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
# Install Helm
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
# Install Cert-Manager
export CERT_MANAGER_CHART_VERSION="v1.10.0"
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager \
--version $CERT_MANAGER_CHART_VERSION \
--set installCRDs=true \
--set namespace=kube-system
# Provide ClusterIssuer
export LETSENCRYPT_SERVER_URL="https://acme-v02.api.letsencrypt.org/directory"
export LETSENCRYPT_EMAIL="<email>"
cat >issuer-letsencrypt.yml <<EOF
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: ${LETSENCRYPT_SERVER_URL}
email: ${LETSENCRYPT_EMAIL}
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress: {}
EOF
# Wait until cert-manager is ready
kubectl apply -f issuer-letsencrypt.yml
# Install MongoDB
# Install password generator
sudo apt install pwgen
export MONGODB_ROOT_PASSWORD=$(pwgen 32 1)
export MONGODB_REPLICA_SET_KEY=$(pwgen 32 1)
export MONGODB_CHART_VERSION="12.1.31"
export MONGODB_TAG="5.0.10-debian-11-r7"
cat >mongodb.yml <<EOF
architecture: "replicaset"
replicaCount: 1
arbiter:
enabled: true
auth:
rootPassword: ${MONGODB_ROOT_PASSWORD}
replicaSetKey: ${MONGODB_REPLICA_SET_KEY}
readinessProbe:
timeoutSeconds: 20
image:
tag: "${MONGODB_TAG}"
persistence:
size: "4Gi"
EOF
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
helm upgrade --install mongodb bitnami/mongodb --version $MONGODB_CHART_VERSION -f mongodb.yml
# Get connection string
echo mongodb://root:${MONGODB_ROOT_PASSWORD}@mongodb-0.mongodb-headless.default.svc.cluster.local:27017,mongodb-1.mongodb-headless.default.svc.cluster.local:27017
# Install NATS, message broker
export NATS_IMAGE="nats:2.7.4-alpine"
export NATS_CHART_VERSION="0.15.1"
cat >nats.yml <<EOF
cluster:
enabled: true
replicas: 2
nats:
image: "${NATS_IMAGE}"
jetstream:
enabled: true
memStorage:
enabled: true
size: "1Gi"
fileStorage:
enabled: true
size: "2Gi"
storageDirectory: /data/
EOF
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
helm repo update
helm install nats nats/nats --version $NATS_CHART_VERSION -f nats.yml
# Install MinIO, storage interface provider
export MINIO_TAG="RELEASE.2021-06-17T00-10-46Z"
export MINIO_CHART_VERSION="4.1.7"
export MINIO_ACCESS_KEY=$(pwgen 32 1)
export MINIO_SECRET_KEY=$(pwgen 32 1)
cat >minio-operator.yml <<EOF
tenants: {}
EOF
helm repo add minio https://operator.min.io/
helm repo update
helm install minio-operator minio/minio-operator --version $MINIO_CHART_VERSION -f minio-operator.yml
cat >minio.yml <<EOF
apiVersion: v1
kind: Secret
metadata:
name: minio-creds-secret
type: Opaque
data:
accesskey: $(echo -n $MINIO_ACCESS_KEY | base64)
secretkey: $(echo -n $MINIO_SECRET_KEY | base64)
---
apiVersion: minio.min.io/v2
kind: Tenant
metadata:
name: minio
labels:
app: minio
spec:
image: minio/minio:${MINIO_TAG}
credsSecret:
name: minio-creds-secret
pools:
- servers: 2
volumesPerServer: 2
volumeClaimTemplate:
metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: "local-path"
mountPath: /export
requestAutoCert: false
EOF
kubectl apply -f minio.yml
# Create ingress for minio where artifacts will be uploaded
export MINIO_DOMAIN_NAME="<domain>"
cat >minio-ingress.yml <<EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
spec:
tls:
- hosts:
- ${MINIO_DOMAIN_NAME}
secretName: minio-ingress-tls
rules:
- host: "${MINIO_DOMAIN_NAME}"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: minio
port:
number: 80
EOF
kubectl apply -f minio-ingress.yml
# Mender deployment
# Need two keys. For device authentication and user generation
openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:3072 | openssl rsa -out device_auth.key
openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:3072 | openssl rsa -out useradm.key
helm repo add mender https://charts.mender.io
helm repo update
export MENDER_SERVER_DOMAIN="<domain>"
export MENDER_SERVER_URL="https://${MENDER_SERVER_DOMAIN}"
export MENDER_VERSION="3.4.0"
cat >mender-${MENDER_VERSION}.yml <<EOF
global:
enterprise: false
mongodb:
URL: "mongodb://root:${MONGODB_ROOT_PASSWORD}@mongodb-0.mongodb-headless.default.svc.cluster.local:27017,mongodb-1.mongodb-headless.default.svc.cluster.local:27017"
nats:
URL: "nats://nats:4222"
url: "${MENDER_SERVER_URL}"
api_gateway:
env:
SSL: false
device_auth:
certs:
key: |-
$(cat device_auth.key | sed -e 's/^/ /g')
useradm:
certs:
key: |-
$(cat useradm.key | sed -e 's/^/ /g')
EOF
helm upgrade --install mender mender/mender --version $MENDER_VERSION -f mender-$MENDER_VERSION.yml
cat >mender-ingress.yml <<EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mender-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
spec:
tls:
- hosts:
- ${MENDER_SERVER_DOMAIN}
secretName: mender-ingress-tls
rules:
- host: "${MENDER_SERVER_DOMAIN}"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mender-api-gateway
port:
number: 80
EOF
kubectl apply -f mender-ingress.yml
# Create a new user
export USER_EMAIL="<mail>"
export USER_PASSWORD="<password>"
USERADM_POD=$(kubectl get pod -l 'app.kubernetes.io/name=useradm' -o name | head -1)
kubectl exec $USERADM_POD -- useradm create-user --username "${USER_EMAIL}" --password "${USER_PASSWORD}"