...menustart
...menuend
- install kubectl...
$ curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.14.0/kind-$(uname)-amd64 $ chmod +x ./kind $ sudo mv ./kind /usr/local/bin/
QuickStart: Create a simple kind cluster with extraPortMappings and node-labels.
- create cluster
- extraPortMappings allow the local host to make requests to the Ingress controller over ports 80/443
- node-labels only allow the ingress controller to run on a specific node(s) matching the label selector
cat <<EOF | kind create cluster --name wslk8s --config=- kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane kubeadmConfigPatches: - | kind: InitConfiguration nodeRegistration: kubeletExtraArgs: node-labels: "ingress-ready=true" extraPortMappings: - containerPort: 80 hostPort: 80 protocol: TCP - containerPort: 443 hostPort: 443 protocol: TCP EOF
- test whether it works...
$ kubectl cluster-info Kubernetes control plane is running at https://127.0.0.1:35537 CoreDNS is running at https://127.0.0.1:35537/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
Advanced: create a kind cluster with ingress & local registry enabled
#!/bin/sh
set -o errexit
# with cluster name:
# --name wslk8s
# with register name:
# kind-registry
# with register port:
# 5050
# with ingress enable
# create registry container unless it already exists
reg_name='kind-registry'
reg_host="<TODO: your reg host>"
reg_port='5050'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
docker run \
-d --restart=always -p "${reg_port}:5000" --name "${reg_name}" \
registry:2
fi
# create a cluster with the local registry enabled in containerd
cat <<EOF | kind create cluster --name wslk8s --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."${reg_host}:${reg_port}"]
endpoint = ["http://${reg_host}:${reg_port}"]
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP
EOF
# connect the registry to the cluster network if not already connected
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
docker network connect "kind" "${reg_name}"
fi
# Document the local registry
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "${reg_host}:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF
Ingress controller is a service which type is LoadBalancer
. It is also another set of pods that run on your nodes in your k8s cluster, and thus evaluation and processing of ingress routes.
- evaluates all the rules
- manages redirections
- entrypoint to cluster
- many third-party implementations
- there is one from kubernetes itself which is
K8s nginx ingress controller
- if you are using a cloud service, you would have a cloud load balancer that is specifically implemented by that cloud provider.
- there is one from kubernetes itself which is
# specific version
# kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.2/deploy/static/provider/cloud/deploy.yaml
# latest version
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml
You will notice that ingress-nginx-controller listening port has been changed
$ kubectl get svc -A
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx ingress-nginx-controller LoadBalancer 10.108.84.93 localhost 80:31402/TCP,443:32053/TCP 20s
wait ingress-nginx-controller to be running.
$ kubectl get po -n ingress-nginx
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-sxwsg 0/1 Completed 0 51s
ingress-nginx-admission-patch-b85nx 0/1 Completed 1 50s
ingress-nginx-controller-b6cb664bc-tnzbm 1/1 Running 0 51s
Here is a simple sample to test ingress on Mac OSX docker desktop.
apple.yaml
kind: Pod
apiVersion: v1
metadata:
name: apple-app
labels:
app: apple
spec:
containers:
- name: apple-app
image: hashicorp/http-echo
args:
- "-text=apple"
---
kind: Service
apiVersion: v1
metadata:
name: apple-service
spec:
selector:
app: apple
ports:
- port: 5678 # Default port for image
banana.yaml
kind: Pod
apiVersion: v1
metadata:
name: banana-app
labels:
app: banana
spec:
containers:
- name: banana-app
image: hashicorp/http-echo
args:
- "-text=banana"
---
kind: Service
apiVersion: v1
metadata:
name: banana-service
spec:
selector:
app: banana
ports:
- port: 5678 # Default port for image
ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: example-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
# kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host:
http:
paths:
- path: /apple
pathType: Prefix
backend:
service:
name: apple-service
port:
number: 5678
- path: /banana
pathType: Prefix
backend:
service:
name: banana-service
port:
number: 5678
To check whether your ingress have got address.
$ kubectl get ingress -A
NAMESPACE NAME CLASS HOSTS ADDRESS PORTS AGE
default example-ingress <none> * localhost 80 4m17s
$ curl localhost/apple
apple
if your ingress's ADDRESS column is empty, you can use following command to debug
$ kubectl describe ingress example-ingress
Name: example-ingress
Namespace: default
Address: localhost
Default backend: default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
Rules:
Host Path Backends
---- ---- --------
*
/apple apple-service:5678 (10.1.0.17:5678)
/banana banana-service:5678 (10.1.0.18:5678)
Annotations: ingress.kubernetes.io/rewrite-target: /
kubernetes.io/ingress.class: nginx
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Sync 91s (x2 over 2m) nginx-ingress-controller Scheduled for sync
$ kubectl -n ingress-nginx logs ingress-nginx-controller-xxxxx
...
I0325 07:01:02.452984 8 status.go:299] "updating Ingress status" namespace="default" ingress="example-ingress" currentValue=[] newValue=[{IP: Hostname:localhost Ports:[]}]
I0325 07:01:02.461367 8 event.go:282] Event(v1.ObjectReference{Kind:"Ingress", Namespace:"default", Name:"example-ingress", UID:"e1b49b5e-c5a0-49a0-8e2a-64dd36820261", APIVersion:"networking.k8s.io/v1", ResourceVersion:"296455", FieldPath:""}): type: 'Normal' reason: 'Sync' Scheduled for sync
PS. Do NOT add ANNOTATIONS that are not recognized by ingress nginx !! e.g. aws alb annotations
route with specific host sample:
ingress with specified host
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {name}-ingress-443
spec:
rules:
- host: dot-iap-dev.imac
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: {name}
port:
number: {port}
$ kubectl get ingress -A
NAMESPACE NAME CLASS HOSTS ADDRESS PORTS AGE
umc-dot-dev dot-iap-ingress-443 <none> dot-iap-dev.imac localhost 80 28m
$ curl localhost
$ curl dot-iap-dev.imac
ok
$ curl localhost
404 Not Found
# because ingress host is `dot-iap-dev.imac`
# so we need specify that host in request headers
$ curl -H "host: dot-iap-dev.imac" localhost
ok
-
install dash board
# check https://github.com/kubernetes/dashboard kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.6.0/aio/deploy/recommended.yaml
-
create a simple user
- Creating a Service Account
apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kubernetes-dashboard
- Creating a ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kubernetes-dashboard
- Getting a Bearer Token
$ kubectl -n kubernetes-dashboard create token admin-user --duration=10000h eyJhbGciOiJSUzI1NiIsImtpZCI...
- Creating a Service Account
-
to allow access from remote machine
- if you can successfully access cluster via kubectl
kubectl proxy
- otherwise, you can use ssh tunnel
- on your workstation, create a tunnel to k8s server
ssh -L 9999:127.0.0.1:8001 -N -f -l <ssh user name> <k8s master host name or ip>
- repalce 8001 to 9999
- on your workstation, create a tunnel to k8s server
- if you can successfully access cluster via kubectl
-
remote tunnels you maybe use...
# tunnel 1, access k8s dashboard , 9999 -> 8001, dashboard port loc_port=9999 ret=`lsof -i:$loc_port` if [ -z "$ret" ] then nohup ssh -p 2222 -L $loc_port:127.0.0.1:8001 -N -f -l <ssh user> <ssh host> &> /dev/null fi sleep 1 # tunnel 2, access kubectl, 9998 -> api server port loc_port=9998 ret=`lsof -i:$loc_port` if [ -z "$ret" ] then nohup ssh -p 2222 -L $loc_port:127.0.0.1:45933 -N -f -l <ssh user> <ssh host> &> /dev/null fi