Kops Configuration Prcedures

Step 1 - Install kubectl


curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.9.0/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
export PATH=$PATH:/usr/local/bin/
kubectl

Step 2 - Install kops


#Install "kops" on Linux
yum install wget
curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64
chmod +x kops-linux-amd64
sudo mv kops-linux-amd64 /usr/local/bin/kops

kops upgrade
Reference - https://github.com/kubernetes/kops

Step 3 - Configure AWS Route 53


#DNS Changes (Refer the images show below;)
	Create a Hosted Zone in Route53 k8.rajeshkumar.xyz
	Get a NS & SOA
	Go to Godaady DNS Server
	Enter the NS records
	
$ yum install bind-utils -y
$ dig NS k8.rajeshkumar.xyz

[root@ip-172-31-19-219 ~]# dig NS k8.rajeshkumar.xyz

; <<>> DiG 9.9.4-RedHat-9.9.4-61.el7 <<>> NS k8.rajeshkumar.xyz
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 7101
;; flags: qr rd ra; QUERY: 1, ANSWER: 4, AUTHORITY: 0, ADDITIONAL: 1

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;k8.rajeshkumar.xyz.            IN      NS

;; ANSWER SECTION:
k8.rajeshkumar.xyz.     60      IN      NS      ns-1392.awsdns-46.org.
k8.rajeshkumar.xyz.     60      IN      NS      ns-1599.awsdns-07.co.uk.
k8.rajeshkumar.xyz.     60      IN      NS      ns-257.awsdns-32.com.
k8.rajeshkumar.xyz.     60      IN      NS      ns-1013.awsdns-62.net.

;; Query time: 339 msec
;; SERVER: 172.31.0.2#53(172.31.0.2)
;; WHEN: Wed Jun 20 04:26:40 UTC 2018
;; MSG SIZE  rcvd: 188


Step 4 - Configure AWS user access and Assign access


# Create a IAM Group with following Permission
AmazonEC2FullAccess  
AmazonRoute53FullAccess  
AmazonS3FullAccess  
IAMFullAccess  
AmazonVPCFullAccess

# Create a IAM user with "Programmatic access" and Assign to created group

# Download access "AWS Access Key ID" and "AWS Secret Access Key"

Step 5 - Configure AWS cli


# Ubunutu - Install and configure the AWS CLI on Linux
$ sudo apt-get install awscli
OR
$ sudo apt-get install python-pip
$ sudo pip install awscli

# RHEL/Centos - Install and configure the AWS CLI on Linux
$ sudo yum -y update
$ sudo yum -y install python-pip
$ sudo pip install awscli

# Python Script - Install and configure the AWS CLI on Linux
$ curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py"
$ python get-pip.py
$ sudo pip install awscli

$ aws configure
[root@ip-172-31-19-219 ec2-user]# aws configure
AWS Access Key ID [None]: AKIAJ22R34KLMMRARVCC
AWS Secret Access Key [None]: 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
Default region name [None]: ap-south-1
Default output format [None]:

# Verify AWS Configuration
$ aws ec2 describe-regions
$ aws ec2 describe-availability-zones --region ap-south-1

#Mumbai Region
region - ap-south-1
AZ - ap-south-1a

Step 6 - Craete a S3 bucket


# Create and then list a new S3 bucket
$ aws s3 mb s3://cluster1.k8.rajeshkumar.xyz
$ aws s3 ls

Setting the environment variables



#Using kubeconfig:  C:\Users\Rajesh/.kube/conf

$ export CLUSTER_NAME="cluster1.k8.rajeshkumar.xyz"
$ export KUBERNETES_VERSION="https://storage.googleapis.com/kubernetes-release/release/v1.9.0/"
$ export AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION:-ap-south-1}
$ export AWS_AVAILABILITY_ZONES="$(aws ec2 describe-availability-zones --query 'AvailabilityZones[].ZoneName' --output text | awk -v OFS="," '$1=$1')"
$ export KOPS_STATE_STORE=s3://cluster1.k8.rajeshkumar.xyz

# IN AWS as a ec2-user 
$ cp ~/.ssh/authorized_keys ~/.ssh/id_rsa.pub

$ kops create cluster --name $CLUSTER_NAME --zones $AWS_AVAILABILITY_ZONES --kubernetes-version $KUBERNETES_VERSION

Check following
	AutoscalingGroup/master-ap-south-1a.masters.cluster1.k8.rajeshkumar.xyz
	AutoscalingGroup/nodes.cluster1.k8.rajeshkumar.xyz
	DHCPOptions/cluster1.k8.rajeshkumar.xyz
	EBSVolume/a.etcd-events.cluster1.k8.rajeshkumar.xyz
	EBSVolume/a.etcd-main.cluster1.k8.rajeshkumar.xyz
	IAMInstanceProfile/masters.cluster1.k8.rajeshkumar.xyz
	IAMInstanceProfile/nodes.cluster1.k8.rajeshkumar.xyz
	IAMInstanceProfileRole/masters.cluster1.k8.rajeshkumar.xyz
	IAMInstanceProfileRole/nodes.cluster1.k8.rajeshkumar.xyz
	IAMRole/masters.cluster1.k8.rajeshkumar.xyz
	IAMRole/nodes.cluster1.k8.rajeshkumar.xyz
	IAMRolePolicy/masters.cluster1.k8.rajeshkumar.xyz
	IAMRolePolicy/nodes.cluster1.k8.rajeshkumar.xyz
	InternetGateway/cluster1.k8.rajeshkumar.xyz
	Keypair/apiserver-aggregator
	Keypair/apiserver-aggregator-ca
	Keypair/apiserver-proxy-client
	Keypair/ca
	Keypair/kops
	Keypair/kube-controller-manager
	Keypair/kube-proxy
	Keypair/kube-scheduler
	Keypair/kubecfg
	Keypair/kubelet
	Keypair/kubelet-api
	Keypair/master
	LaunchConfiguration/master-ap-south-1a.masters.cluster1.k8.rajeshkumar.xyz
	LaunchConfiguration/nodes.cluster1.k8.rajeshkumar.xyz
	ManagedFile/cluster1.k8.rajeshkumar.xyz-addons-bootstrap
	ManagedFile/cluster1.k8.rajeshkumar.xyz-addons-core.addons.k8s.io
	ManagedFile/cluster1.k8.rajeshkumar.xyz-addons-dns-controller.addons.k8s.io-k8s-1.6
	ManagedFile/cluster1.k8.rajeshkumar.xyz-addons-dns-controller.addons.k8s.io-pre-k8s-1.6
	ManagedFile/cluster1.k8.rajeshkumar.xyz-addons-kube-dns.addons.k8s.io-k8s-1.6
	ManagedFile/cluster1.k8.rajeshkumar.xyz-addons-kube-dns.addons.k8s.io-pre-k8s-1.6
	ManagedFile/cluster1.k8.rajeshkumar.xyz-addons-limit-range.addons.k8s.io
	ManagedFile/cluster1.k8.rajeshkumar.xyz-addons-rbac.addons.k8s.io-k8s-1.8
	ManagedFile/cluster1.k8.rajeshkumar.xyz-addons-storage-aws.addons.k8s.io-v1.6.0
	ManagedFile/cluster1.k8.rajeshkumar.xyz-addons-storage-aws.addons.k8s.io-v1.7.0
	Route/0.0.0.0/0
	RouteTable/cluster1.k8.rajeshkumar.xyz
	RouteTableAssociation/ap-south-1a.cluster1.k8.rajeshkumar.xyz
	RouteTableAssociation/ap-south-1b.cluster1.k8.rajeshkumar.xyz
	SSHKey/kubernetes.cluster1.k8.rajeshkumar.xyz-47
	Secret/admin
	Secret/kube
	Secret/kube-proxy
	Secret/kubelet
	Secret/system:controller_manager
	Secret/system:dns
	Secret/system:logging
	Secret/system:monitoring
	Secret/system:scheduler
	SecurityGroup/masters.cluster1.k8.rajeshkumar.xyz
	SecurityGroup/nodes.cluster1.k8.rajeshkumar.xyz
	SecurityGroupRule/all-master-to-master
	SecurityGroupRule/all-master-to-node
	SecurityGroupRule/all-node-to-node
	SecurityGroupRule/https-external-to-master-0.0.0.0/0
	SecurityGroupRule/master-egress
	SecurityGroupRule/node-egress
	SecurityGroupRule/node-to-master-tcp-1-2379
	SecurityGroupRule/node-to-master-tcp-2382-4000
	SecurityGroupRule/node-to-master-tcp-4003-65535
	SecurityGroupRule/node-to-master-udp-1-65535
	SecurityGroupRule/ssh-external-to-master-0.0.0.0/0
	SecurityGroupRule/ssh-external-to-node-0.0.0.0/0
	Subnet/ap-south-1a.cluster1.k8.rajeshkumar.xyz
	Subnet/ap-south-1b.cluster1.k8.rajeshkumar.xyz
	VPC/cluster1.k8.rajeshkumar.xyz
	VPCDHCPOptionsAssociation/cluster1.k8.rajeshkumar.xyz
	
$ kops update cluster --name $CLUSTER_NAME

$ kops update cluster --name $CLUSTER_NAME --yes
OR 
# $ kops create cluster --name $CLUSTER_NAME --zones $AWS_AVAILABILITY_ZONES --kubernetes-version $KUBERNETES_VERSION --yes

$ kops validate cluster cluster1.k8.rajeshkumar.xyz

[ec2-user@ip-172-31-19-219 ~]$ kops validate cluster
Using cluster from kubectl context: cluster1.k8.rajeshkumar.xyz

Validating cluster cluster1.k8.rajeshkumar.xyz

INSTANCE GROUPS
NAME                    ROLE    MACHINETYPE     MIN     MAX     SUBNETS
master-ap-south-1a      Master  c4.large        1       1       ap-south-1a
nodes                   Node    t2.medium       2       2       ap-south-1a,ap-south-1b

NODE STATUS
NAME                                            ROLE    READY
ip-172-20-37-133.ap-south-1.compute.internal    master  True
ip-172-20-42-67.ap-south-1.compute.internal     node    True
ip-172-20-87-44.ap-south-1.compute.internal     node    True

Your cluster cluster1.k8.rajeshkumar.xyz is ready

# Copy the pem file to ~/.ssh/id_rsa
$ chmod 600 .ssh/id_rsa
$ ssh -i ~/.ssh/id_rsa admin@api.cluster1.k8.rajeshkumar.xyz
$ ps -eaf
$ ps -eaf | grep docker
$ sudo -s 
$ docker info
	Observer following...
		k8s_dns-controller_dns-controller
		k8s_POD_dns-controller
		k8s_etcd-container_etcd-server-events
		k8s_etcd-container_etcd-server
		k8s_kube-proxy_kube-proxy
		k8s_kube-apiserver_kube-apiserver
		k8s_kube-scheduler_kube-scheduler
		k8s_kube-controller-manager_kube-controller-manager
		k8s_POD_kube-proxy
		k8s_POD_kube-scheduler
		k8s_POD_kube-apiserver
		k8s_POD_etcd-server-events
		k8s_POD_kube-controller-manager
		k8s_POD_etcd-server
		happy_bell
$ ps -eaf | grep worker

# Following Resources would be created in AWS
keypair:kubernetes.cluster1.k8.rajeshkumar.xyz-47:0a:63:3d:f1:f4:f5:dd:7d:bc:00:55:34:c8:48:c5
internet-gateway:igw-d07794b8   
autoscaling-group:master-ap-south-1a.masters.cluster1.k8.rajeshkumar.xyz       
instance:i-08ea4e96805f6dbb8   
instance:i-01b585e559cd7a2b8   
autoscaling-group:nodes.cluster1.k8.rajeshkumar.xyz     
instance:i-07a88ea4061a213c0   
route53-record:Z26H1FJAGQ3MLK/etcd-events-a.internal.cluster1.k8.rajeshkumar.xyz.      
iam-instance-profile:masters.cluster1.k8.rajeshkumar.xyz      
iam-instance-profile:nodes.cluster1.k8.rajeshkumar.xyz 
iam-role:nodes.cluster1.k8.rajeshkumar.xyz     
iam-role:masters.cluster1.k8.rajeshkumar.xyz   
autoscaling-config:master-ap-south-1a.masters.cluster1.k8.rajeshkumar.xyz-20180620091847 
autoscaling-config:nodes.cluster1.k8.rajeshkumar.xyz-20180620091847  
volume:vol-044579815b3e6658b 
volume:vol-096e93c15b3599f2d    
security-group:sg-1f02e575     
security-group:sg-9003e4fa     
dhcp-options:dopt-67fbd40f
subnet:subnet-1a4e3872
security-group:sg-1f02e575
volume:vol-096e93c15b3599f2d
volume:vol-044579815b3e6658b
security-group:sg-9003e4fa
route-table:rtb-2286814a
internet-gateway:igw-d07794b8
vpc:vpc-2bc68543
subnet:subnet-3db81f71


	
# Following Options can be used....

 --cloud=aws
 --zones="ap-south-1a"
 --dns-zone=rajesh.rajeshkumar.xyz
 --name cluster-1.rajesh.rajeshkumar.xy
 --kubernetes-version="1.8.1"
 --state=s3://cluster1.k8.rajeshkumar.xyz
 --node-count=2


$ kops delete cluster --name=cluster1.k8.rajeshkumar.xyz --yes

###############PHASE - 2###############
$ export NODE_SIZE=${NODE_SIZE:-t2.small}
$ export MASTER_SIZE=${MASTER_SIZE:-t2.medium}


$ kops create cluster --name $CLUSTER_NAME --zones $AWS_AVAILABILITY_ZONES --kubernetes-version $KUBERNETES_VERSION --node-size  $NODE_SIZE --master-size $MASTER_SIZE --node-count 1 --yes

$ kops validate cluster cluster1.k8.rajeshkumar.xyz
$ more .kube/config

#How to get Instance Groups?
kops get cluster1.k8.rajeshkumar.xyz

# Change the number of nodes?
# To edit "node" "InstanceGroup"
$ kops edit ig --name=cluster1.k8.rajeshkumar.xyz nodes
	----- MODIFY THE minSize
$ kops update cluster --name $CLUSTER_NAME --yes

# To edit your "master" "InstanceGroup"
$ kops edit ig --name=cluster1.k8.rajeshkumar.xyz master-ap-south-1a


-------------------------------------------------------
			BACK TO kubectl
$ kubectl get
$ kubectl get pods
$ kubectl get deployments
$ kubectl get nodes
$ kubectl get replicasets
$ kubectl get replicationcontrollers
$ kubectl get services

$ kubectl config get-clusters
$ kubectl config current-context

$ kubectl get nodes
$ kubectl describe nodes ip-172-20-34-139.ap-south-1.compute.internal

$ kubectl run hello-nginx --image=nginx --port=80
$ kubectl expose deployment hello-nginx --type=NodePort
# We need to find out the port it used when created a service:
$ kubectl get services
--As you can see, we have a container running and exposing port 80, k8s exposed it on all nodes on port 31594

$ ssh -i ~/.ssh/id_rsa admin@node-ip
curl localhost:31594

# Next we will expose the service through LoadBalancer, in the end of the day we deployed the whole thing
not just for NodePort testing

$ kubectl expose deployment hello-nginx --port 8080 --target-port=80 --type=LoadBalancer
$ kubectl expose deployment hello-nginx --name hello-nginx-lc --port 8080 --target-port=80  --type=LoadBalancer
$ kubectl get services
$ kubectl describe services "hello-nginx-lc
curl aa710abc4748b11e88b510236e1b2082-1881749215.ap-south-1.elb.amazonaws.com:8080
or browse

####FINAL######
Now our service is publicly accessible! Let’s create an alias nginx.k8.rajeshkumar.xyz now for our load balancer, go to route53,
and in our subdomain, k8.rajeshkumar.xyz, create a new recordset of type A, Alias=Yes,
click alias target and pick up you load balancer from list of ELB classic load balancers:

$ kops delete cluster --name=cluster1.k8.rajeshkumar.xyz --yes

####FINAL######




# Create a cluster in AWS that has HA masters.  This cluster
# will be setup with an internal networking in a private VPC.
# A bastion instance will be setup to provide instance access.
$ kops create cluster k8s-clusters.example.com \
  --node-count 3 \
  --zones $ZONES \
  --node-size $NODE_SIZE \
  --master-size $MASTER_SIZE \
  --master-zones $ZONES \
  --networking weave \
  --topology private \
  --bastion="true" \
  --yes


kops edit cluster cluster-1.raj.rajeshkumar.xyz
kops update cluster --yes cluster-1.rajesh.rajeshkumar.xyz

 * list clusters with: kops get cluster
 * edit this cluster with: kops edit cluster cluster-1.raj.rajeshkumar.xyz
 * edit your node instance group: kops edit ig --name=cluster-1.raj.rajeshkumar.xyz nodes
 * edit your master instance group: kops edit ig --name=cluster-1.raj.rajeshkumar.xyz master-ap-south-1a

 * validate cluster: kops validate cluster
 * list nodes: kubectl get nodes --show-labels
 * ssh to the master: ssh -i ~/.ssh/id_rsa admin@api.cluster-1.raj.rajeshkumar.xyz
The admin user is specific to Debian. If not using Debian please use the appropriate user based on your OS.
 * read about installing addons: https://github.com/kubernetes/kops/blob/master/docs/addons.md



Avail Rajesh Kumar as trainer at 50% Discount
Puppet Online Training
Puppet Classroom TrainingEnroll Now