kubectl run
Copy kubectl run --image=ubuntu:18.04 tmp-app --command -- tail -f /dev/null
Copy kubectl run -it --rm aks-ssh --image=ubuntu:18.04
Copy kubectl run -it --rm busybox --image=busybox --restart=Never -- sh
aks-mgmt
Copy ---
apiVersion: apps/v1
kind: Deployment
metadata:
name: aks-mgmt
labels:
app: aks-mgmt
spec:
replicas: 1
selector:
matchLabels:
app: aks-mgmt
template:
metadata:
labels:
app: aks-mgmt
spec:
containers:
- image: tadeugr/aks-mgmt
name: aks-mgmt
command: ["/bin/bash","-c"]
args: ["/start.sh; tail -f /dev/null"]
ports:
- containerPort: 8080
Ubuntu
Copy ---
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-ubuntu
labels:
app: my-ubuntu
spec:
replicas: 1
selector:
matchLabels:
app: my-ubuntu
template:
metadata:
labels:
app: my-ubuntu
spec:
containers:
- name: my-ubuntu
image: ubuntu:18.04
command: ["tail"]
args: ["-f", "/dev/null"]
Nginx
Copy kubectl -n my-namespace create -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-example
labels:
app: my-example
spec:
replicas: 1
selector:
matchLabels:
app: my-example
template:
metadata:
labels:
app: my-example
spec:
containers:
- name: my-example
image: nginx
ports:
- containerPort: 80
---
kind: Service
apiVersion: v1
metadata:
name: my-example
spec:
selector:
app: my-example
ports:
- protocol: TCP
port: 80
type: LoadBalancer
EOF
Bitcoin mining
Create the manifest:
Copy cat > /tmp/bitcoin.yml <<EOF
---
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-bitcoin-mining
---
apiVersion: v1
kind: LimitRange
metadata:
name: cpu-limit-range
namespace: kubernetes-bitcoin-mining
spec:
limits:
- default:
cpu: 1
defaultRequest:
cpu: 0.5
type: Container
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-btc
namespace: kubernetes-bitcoin-mining
spec:
selector:
matchLabels:
run: my-btc
replicas: 1
template:
metadata:
labels:
run: my-btc
spec:
containers:
- name: 2nd
image: alexellis2/cpu-opt:2018-1-2
command: ["./cpuminer", "--cpu-priority", "5", "-a", "hodl", "-o", "stratum+tcp://cryptonight.jp.nicehash.com:3355", "-u", "384M7e8n5B4CBYsG5U2AN4AqpoQwmMr7tX"]
EOF
You must read alexellis’ documentation regarding each parameter , the most important one right now is the -u
. It is your wallet address .
Also read about the -o. And find your nearest stratum servers address .
Deploy it:
Copy kubectl create -f /tmp/bitcoin.yml
Scale out:
Copy kubectl -n kubernetes-bitcoin-mining \
scale deployment my-btc --replicas=500
Testing
Double check if your pods are running and healthy:
Copy kubectl -n kubernetes-bitcoin-mining get pod
Access one of your nodes and make sure "cpuminer" is running and using your wallet address.
Copy ps aux |grep cpuminer
Rollback
Delete all resources:
Copy kubectl delete -f /tmp/bitcoin.yaml
References
https://github.com/alexellis/mine-with-docker
Inter-process communications (IPC)
Create the manifest:
Copy cat > /tmp/ipc.yml <<EOF
---
apiVersion: v1
kind: Namespace
metadata:
name: multi-container-ipc-nginx-proxy
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mc3-nginx-conf
namespace: multi-container-ipc-nginx-proxy
data:
nginx.conf-PROXY: |-
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
upstream webapp {
server 127.0.0.1:8080;
}
server {
add_header Custom-Header TestPROXY;
listen 80;
server_name localhost;
location / {
proxy_pass http://webapp;
proxy_redirect off;
#root /usr/share/nginx/html;
#index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
nginx.conf-HTTP: |-
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
server {
add_header Custom-Header TestHTTP;
listen 8080;
server_name localhost;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-nginx
namespace: multi-container-ipc-nginx-proxy
spec:
selector:
matchLabels:
run: my-nginx
replicas: 1
template:
metadata:
labels:
run: my-nginx
spec:
volumes:
- name: nginx-proxy-config
configMap:
name: mc3-nginx-conf
containers:
- name: proxy
image: nginx
ports:
- containerPort: 80
volumeMounts:
- name: nginx-proxy-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf-PROXY
- name: nginx
image: nginx
ports:
- containerPort: 8080
volumeMounts:
- name: nginx-proxy-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf-HTTP
---
apiVersion: v1
kind: Service
metadata:
name: my-nginx
namespace: multi-container-ipc-nginx-proxy
labels:
run: my-nginx
spec:
type: LoadBalancer
ports:
- name: proxy
protocol: TCP
port: 80
targetPort: 80
- name: http
protocol: TCP
port: 8080
targetPort: 8080
selector:
run: my-nginx
EOF
Deploy it:
Copy kubectl create -f /tmp/ipc.yml
Testing
Get your pod’s endpoint:
Copy kubectl \
-n multi-container-ipc-nginx-proxy \
describe service my-nginx \
|grep "LoadBalancer Ingress"
Access your Load Balance endpoint in your browser:
Open your browser’s network inspector and check Response Headers . You should see TestProxy (which was added by nginx on the proxy container) and TestHTTP (which was added by nginx on the HTTP container).
References
https://bitbucket.org/devopsbuzz/devops/src/master/kubernetes/deploy/basic/ipc-proxy-000/
https://www.mirantis.com/blog/multi-container-pods-and-container-communication-in-kubernetes/
Shared volumes
Create the manifest:
Copy cat > /tmp/shared-volume.yml <<EOF
---
apiVersion: v1
kind: Namespace
metadata:
name: multi-container-shared-volume
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-nginx
namespace: multi-container-shared-volume
spec:
selector:
matchLabels:
run: my-nginx
replicas: 1
template:
metadata:
labels:
run: my-nginx
spec:
volumes:
- name: html
emptyDir: {}
containers:
- name: 1st
image: nginx
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
ports:
- containerPort: 80
- name: 2nd
image: debian
volumeMounts:
- name: html
mountPath: /html
command: ["/bin/sh", "-c"]
args:
- while true; do
date >> /html/index.html;
sleep 1;
done
ports:
- containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
name: my-nginx
namespace: multi-container-shared-volume
labels:
run: my-nginx
spec:
type: LoadBalancer
ports:
- port: 80
protocol: TCP
selector:
run: my-nginx
EOF
Deploy it:
Copy kubectl create -f /tmp/shared-volume.yml
Testing
Check the index.html file being updated every second:
Copy kubectl \
-n multi-container-shared-volume \
exec my-nginx-[PRESS TAB TO AUTOCOMPLETE] \
-c 1st -- /bin/cat /usr/share/nginx/html/index.html
You must have kubectl autocomplete enabled autocomplete your pod’s name. Otherwise get your pod’s name running kubectl -n multi-container-shared-volume get pods before.
Get your pod’s endpoint:
Copy kubectl \
-n multi-container-shared-volume \
describe service my-nginx \
|grep "LoadBalancer Ingress"
Access your Load Balance endpoint in your browser, you should see something like this:
References
https://bitbucket.org/devopsbuzz/devops/src/master/kubernetes/deploy/basic/shared-volumes-000/
https://www.mirantis.com/blog/multi-container-pods-and-container-communication-in-kubernetes/
Spinnaker with Halyard
Keep in mind that there are several ways to deploy and use Spinnaker. For example, you can install it on your host server or run a docker image on any server. You are not obligated to deploy it on Kubernetes and deal with Halyard or Helm. If you are looking for a Quick Start, read this documentation: https://www.spinnaker.io/setup/quickstart/
I’m writing this post because this was the easiest, fastest and more reliable way I found. Also, I wanted an “all Kubernetes” solution, centralizing everything in my cluster.
Hardware requirements
At least 2 vCPU available;
Approximately 13GB of RAM available on the nodes (seriously, less than that is not enough and will result in a timeout during the deploy).
Create Spinnaker accounts
Create the manifest:
Copy cat > /tmp/spinnaker-accounts.yml <<EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: spinnaker-service-account
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: spinnaker-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- namespace: default
kind: ServiceAccount
name: spinnaker-service-account
EOF
Deploy it:
Copy kubectl create -f /tmp/spinnaker-accounts.yml
Create tiller service account
Tiller acount will be used later for Helm.
Create tiller service account:
Copy kubectl -n kube-system create sa tiller
Create tiller cluster role binding:
Copy kubectl create clusterrolebinding \
tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
Create Spinnaker namespace
To create Spinnaker namespace, run:
Copy kubectl create namespace spinnaker
Create Spinnaker services
Create the manifest:
Copy cat > /tmp/spinnaker-services.yml <<EOF
---
apiVersion: v1
kind: Service
metadata:
namespace: spinnaker
labels:
app: spin
stack: gate
name: spin-gate-np
spec:
type: LoadBalancer
ports:
- name: http
port: 8084
protocol: TCP
selector:
load-balancer-spin-gate: "true"
---
apiVersion: v1
kind: Service
metadata:
namespace: spinnaker
labels:
app: spin
stack: deck
name: spin-deck-np
spec:
type: LoadBalancer
ports:
- name: http
port: 9000
protocol: TCP
selector:
load-balancer-spin-deck: "true"
EOF
Deploy it:
Copy kubectl create -f /tmp/spinnaker-services.yml
At this point Kubernetes will create Load Balancers and allocate IPs.
Deploy Halyard docker image
Create Halyard deployment:
Copy kubectl create deployment hal \
--image gcr.io/spinnaker-marketplace/halyard:stable
It will take a few minutes for Kubernetes to download the image create the pod. You can see the progress getting your deployments:
Copy kubectl get deployments
IMPORTANT : Do not proceed until hal is AVAILABLE .
After your Halyard deployment is completed, let’s edit the serviceAccountName:
Copy kubectl edit deploy hal
The configuration file you will be opened in your text editor.
Add the serviceAccountName to the spec just above the containers:
Copy ...
spec:
serviceAccountName: spinnaker-service-account
containers:
- image: gcr.io/spinnaker-marketplace/halyard:stable
imagePullPolicy: IfNotPresent
name: halyard
resources: {}
...
Save and close the file. Kubernetes will automatically edit the deployment and start a new pod with the new credentials:
Wait until Kubernetes finishes Terminating and ContainerCreating. So all pods must be Running .
Configure Halyard
Now you need to root access Halyard container.
At the time of this writing (2018-05-25), Halyard Docker container does not allow root auth:
Copy kubectl exec -it --user=root hal-<TAB> bash
error: auth info "root" does not exist
So SSH to the node halyard was deployed, then TTY connect the its container.
So you need to follow the Workaround section of how to access a container TTY to use bash .
Halyard container already has kubectl installed, you only need to configure it and run kubectl from inside a container .
At this point you should have:
Spinnaker and Tiller accounts in your Kubernetes cluster.
Spinnaker namespace in your Kubernetes cluster.
Spinnaker services and Load Balancers endpoints in your Kubernetes cluster.
Halyard docker image deployed in a pod.
Root access to your Halyard docker image.
kubectl configured to manage your cluster.
Is everything OK? Let’s move on…
Connected with root in your Halyard docker image, allow spinnaker user to access root folder (temporarily):
Copy chown -R root:spinnaker /root
chmod -R 775 /root/
Download and install Helm in your Halyard’s container:
Init Helm using tiller account we create earlier:
Copy helm init --service-account tiller --upgrade
Configure Spinnaker
Configure Docker registry
I’m using Docker Hub, but Spinnaker supports different docker registries .
Export environment variables:
Copy export ADDRESS=index.docker.io
export REPOSITORIES=YOUR-USER/YOUR-REPO
export USERNAME=YOUR-USER
Replace all variables with all YOUR info.
TIP: this config uses a custom Docker Hub account and repository. You can use any public one if you want ot keep it simple for now, for example (no username or password required):
Copy export ADDRESS=index.docker.io
export REPOSITORIES=library/nginx
export USERNAME=
Add Docker Registry provider:
Copy hal config provider docker-registry enable
hal config provider docker-registry account add my-docker-registry \
--address $ADDRESS \
--repositories $REPOSITORIES \
--username $USERNAME \
--password
Input your password.
Check if everything is OK:
Copy hal config provider docker-registry account list
Configure storage
I’m using AWS S3 , but Spinnaker supports different storages .
Export your AWS credentials:
Copy export AWS_ACCESS_KEY_ID=ThyFreeFolk
export AWS_SECRET_ACCESS_KEY=YouShallNotPass
export EC2_REGION=ap-southeast-2
export AWS_DEFAULT_REGION=ap-southeast-2
Replace all variables with your info.
The AWS IAM user must have permission to create a bucket
Copy AmazonEC2FullAccess
IAMFullAccess
AmazonEC2ContainerRegistryFullAccess
AmazonS3FullAccess
AmazonVPCFullAccess
AmazonRoute53FullAccess
Add storage:
Copy hal config storage s3 edit \
--access-key-id $AWS_ACCESS_KEY_ID \
--secret-access-key \
--region $EC2_REGION
Then apply your config:
Copy hal config storage edit --type s3
You can access your S3 and see that Halyard created a bucket with the following prefix: spin-
Configure Kubernetes provider
Setup Spinnaker to deploy into Kubernetes:
Copy hal config provider kubernetes enable
hal config provider kubernetes \
account add my-k8s-account \
--docker-registries my-docker-registry
hal config deploy edit \
--type distributed \
--account-name my-k8s-account
Configure Spinnaker version
First check which is the latest halyard version available:
At the time of this writing (2018-05-25), the latest version is 1.7.4:
Copy hal config version edit --version 1.7.4
Configure Spinnaker Dashboard access
You could deploy Spinnaker now, but do not do it yet . If you do, Spinnaker itself will work, but you would need to deal with boring SSH tunneling stuff to access its dashboard.
There is an easier way: use your Load Balancer endpoint to access Spinnaker dashboard.
To do so, first you need to know the endpoints of spin-deck-np and spin-gate-np services.
Describe your services:
Copy kubectl describe svc -n spinnaker
if you have too many services save the output of the command above in a file:
Copy kubectl describe svc -n spinnaker > /tmp/output
From your services description output (either on the screen or inside /tmp/output), let’s search your endpoints.
Find spin-deck section. Get the LoadBalancer Ingress URL inside spin-deck section.
Find spin-gate-np section. Get the LoadBalancer Ingress URL inside spin-gate-np section.
For example:
Copy ...
Name: spin-deck-np
...
LoadBalancer Ingress: 1199884489.ap-southeast-2.elb.amazonaws.com
...
...
Name: spin-gate-np
...
LoadBalancer Ingress: 301760506.ap-southeast-2.elb.amazonaws.com
...
Update Halyard spin-deck-np config using your spin-deck-np endpoint:
Copy hal config security ui \
edit --override-base-url http://YOUR-spin-deck-np-HERE:9000
Do not forget to use port 9000 for spin-deck-np.
Update Halyard spin-gate-np config using your spin-gate-np endpoint:
Copy hal config security api \
edit --override-base-url http://YOUR-spin-gate-np-HERE:8084
Do not forget to use port 8084 for spin-gate-np .
Deploy Spinnaker
Finally!
To Deploy Spinnaker, run:
Go grab a coffee (or tea, water). It will run for quite some time (for me, in a 16G RAM server, it took about 35min).
Open another terminal where you can use kubectl to connect to your cluster (it doesn’t need to be from inside Halyard container) and monitor the progress.
Wait until all pods are READY and RUNNING :
Copy kubectl get pods -n spinnaker
NAME READY STATUS RESTARTS AGE
spin-clouddriver-bootstrap-v000-sdjnl 1/1 Running 0 3m
spin-clouddriver-v000-wg4j2 1/1 Running 0 1m
spin-deck-v000-8gg85 1/1 Running 0 1m
spin-echo-v000-ddc5d 1/1 Running 0 1m
spin-front50-v000-7f2c7 1/1 Running 0 1m
spin-gate-v000-wc8v9 1/1 Running 0 1m
spin-igor-v000-w82d6 1/1 Running 0 1m
spin-orca-bootstrap-v000-crss2 1/1 Running 0 2m
spin-orca-v000-pbfql 1/1 Running 0 1m
spin-redis-bootstrap-v000-fr4q8 1/1 Running 0 3m
spin-redis-v000-5dttx 1/1 Running 0 1m
spin-rosco-v000-t8jwd 1/1 Running 0 1m
Expose Spinnaker ports
Go back to your Halyard TTY (the one you ran hal deploy apply earlier) and run:
Copy hal deploy connect
Forwarding from 127.0.0.1:8084 -> 8084
Forwarding from [::1]:8084 -> 8084
Forwarding from 127.0.0.1:9000 -> 9000
Forwarding from [::1]:9000 -> 9000
Now you can press CTRL+C to exit the command above (the deploy connect is already done).
Testing
At this point you should be fine (a little stressed, but alive).
Access in your browser the spin-deck-np endpoint on port 9000 .
For example (scroll all the way right):
Copy http://1199884489.ap-southeast-2.elb.amazonaws.com:9000
You should see Spinnaker dashboard:
Click on Action , Create Application to make sure everything is OK.
Backup Halyard config in a safe place:
Copy /home/spinnaker/.hal/config
When I say a “safe place” it is outside the Halyard container and outside your cluster. If, by any reason, you need redeploy Spinnaker or build your entire cluster from scratch, Halyard config will be deleted.
You could restore everything running all the steps in this post again, but believe me, backing up Halyard config avoids headaches.
Troubleshooting
If you cannot see Spinnaker Dashboard and/or your deployments and pods are not healthy, start all steps from scratch (it can be complex if this is your fist time).
If you can see Spinnaker Dashboard but can’t load any other screen or can’t perform any action, chances are you missed exposing Spinnaker ports .
If you need further troubleshooting, learn how to redeploy Spinnaker .
Rollback
Clean up everything:
Copy kubectl delete namespace spinnaker
kubectl delete deployment hal
kubectl delete serviceaccount spinnaker-service-account
kubectl delete clusterrolebinding spinnaker-role-binding
kubectl delete serviceaccount tiller
kubectl delete clusterrolebinding tiller
References
https://www.mirantis.com/blog/how-to-deploy-spinnaker-on-kubernetes-a-quick-and-dirty-guide/
https://blog.spinnaker.io/exposing-spinnaker-to-end-users-4808bc936698
Ubuntu with interface and NoVNC access
Create the deployment yml:
Copy cat > /tmp/ubuntu-novnc.yml <<EOF
---
apiVersion: v1
kind: Namespace
metadata:
name: ubuntu-vnc
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ubuntu-vnc
namespace: ubuntu-vnc
spec:
selector:
matchLabels:
run: ubuntu-vnc
replicas: 1
template:
metadata:
labels:
run: ubuntu-vnc
spec:
containers:
- name: proxy
image: chenjr0719/ubuntu-unity-novnc
env:
- name: NGROK
value: "YES"
- name: PASSWORD
value: "123456"
- name: SUDO
value: "YES"
ports:
- containerPort: 6080
---
apiVersion: v1
kind: Service
metadata:
name: ubuntu-vnc
namespace: ubuntu-vnc
labels:
run: ubuntu-vnc
spec:
type: LoadBalancer
ports:
- name: proxy
protocol: TCP
port: 80
targetPort: 6080
selector:
run: ubuntu-vnc
EOF
Then run it:
Copy kubectl -n YOUR-NAMESPACE create -f /tmp/ubuntu-novnc.yml
References
https://hub.docker.com/r/chenjr0719/ubuntu-unity-novnc/tags/
Unifi
Deploy Unifi controller
SSH to the node which will host the controller.
Create the unifi user:
Copy adduser --disabled-password --uid 999 unifi
usermod -aG docker unifi
Create the folder to store files:
Copy mkdir /storage/storage-001/mnt-unifi
chown unifi:unifi /storage/storage-001/mnt-unifi
Connect to your workstation with kubectl.
Create namespace:
Copy kubectl create namespace unifi
Deploy the controller:
Copy kubectl create -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: unifi
namespace: unifi
labels:
app: unifi
spec:
replicas: 1
selector:
matchLabels:
app: unifi
template:
metadata:
labels:
app: unifi
spec:
containers:
- name: unifi
image: jacobalberty/unifi:stable
ports:
- name: port-8080
containerPort: 8080
- name: port-8443
containerPort: 8443
# UDP
- name: port-3478
containerPort: 3478
- name: port-10001
containerPort: 10001
env:
- name: TZ
value: "Pacific/Auckland"
- name: RUNAS_UID0
value: "false"
- name: UNIFI_UID
value: "1006"
- name: UNIFI_GID
value: "1007"
volumeMounts:
- name: mnt-unifi
mountPath: /unifi
volumes:
- name: mnt-unifi
hostPath:
path: /storage/storage-001/mnt-unifi
nodeSelector:
kubernetes.io/hostname: k8snode
EOF
Expose Unifi ports
Copy kubectl create -f - <<EOF
---
apiVersion: v1
kind: Service
metadata:
labels:
app: unifi
name: srv-unifi
namespace: unifi
spec:
externalTrafficPolicy: Cluster
ports:
- name: "port-30080"
nodePort: 30080
port: 8080
protocol: TCP
targetPort: 8080
- name: "port-30443"
nodePort: 30443
port: 8443
protocol: TCP
targetPort: 8443
- name: "port-30478"
nodePort: 30478
port: 3478
protocol: UDP
targetPort: 3478
- name: "port-30001"
nodePort: 30001
port: 10001
protocol: UDP
targetPort: 10001
selector:
app: unifi
sessionAffinity: None
type: NodePort
EOF
You have to open all "nodePort" and protocols in your firewall.