AKS-Tasks/Projects

Note:

# Kubectl, Azure CLI (Ubuntu 22.04) & AKS cluster with NGINX deploy:
sudo apt update
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
kubectl version --short --client
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
az version
az login
az group create --name nn-RG-1 --location southcentralus
az aks create --resource-group nn-RG-1 --name nn-aks --node-count 2
az aks show --name nn-aks --resource-group nn-RG-1
az acr create --resource-group nn-RG-1 --name nnacr786 --sku Standard --location southcentralus
az aks get-credentials --resource-group nn-RG-1 --name nn-aks --overwrite-existing
kubectl get nodes
kubectl get deployments --all-namespaces=true
az aks update -n nn-aks -g nn-RG-1 --attach-acr nnacr786
kubectl create -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/controllers/nginx-deployment.yaml
kubectl get deployments
az group delete --name nn-RG-1 --yes --no-wait
# Project: Deploy Springboot Microservices App into AKS cluster using Jenkins Pipeline and Helm
sudo su - jenkins
az login
vi create-aks.sh
sh create-aks.sh
kubectl get nodes
helm create nn-chart    # go to root account at Github Repo code files and run this helm command
tree nn-chart
nn-chart/values.yaml -->  repository: myacrrepo531.azurecr.io/xxxxxx    # our ACR image link
service type as LoadBalancer
nn-chart/templates/deployment.yaml --> change containerPort to 8080
Jenkins Portal --> create Maven3 variable under Global tool configuration
Jenkins --> Manage Jenkins --> Create credentials --> give azure ACR creds(username, password, ID: ACR) from Azure Portal Settings--> Access keys
Run the Jenkins Build Pipeline
helm ls -n helm-deployment
kubectl get pods -n helm-deployment
kubectl get svc -n helm-deployment ---> access the Springboot App in browser with ALB link or port
az group delete --name resource-group-name --yes --no-wait

Script File (create-aks.sh):
#!/bin/sh
# This is the shell script for creating AKS cluster, ACR Repo and a namespace
#Create Resource Group
AKS_RESOURCE_GROUP=aks-rg
AKS_REGION=centralus
# Set Cluster Name
AKS_CLUSTER=aks-cluster
# set ACR name
ACR_NAME=myacrrepo531
echo $AKS_RESOURCE_GROUP, $AKS_REGION, $AKS_CLUSTER, $ACR_NAME
# Create Resource Group
az group create --location ${AKS_REGION} --name ${AKS_RESOURCE_GROUP}
# Create AKS cluster with two worker nodes
az aks create --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_CLUSTER} --node-count 2 --generate-ssh-keys
# Create Azure Container Registry
az acr create --resource-group ${AKS_RESOURCE_GROUP} \
                     --name ${ACR_NAME} \
                     --sku Standard \
                     --location ${AKS_REGION}
#Providing required permission for downloading Docker image from ACR into AKS Cluster
az aks update -n ${AKS_CLUSTER} -g ${AKS_RESOURCE_GROUP} --attach-acr ${ACR_NAME}
# Configure Kube Credentials
az aks get-credentials --name ${AKS_CLUSTER}  --resource-group ${AKS_RESOURCE_GROUP}
# Create a namespace in AKS cluster for Helm deployment
kubectl create namespace helm-deployment

Jenkins Pipeline:
pipeline {
  tools {
        maven 'Maven3'
    }
    agent any
        environment {
        //once you create ACR in Azure cloud, use that here
        registryName = "myacrrepo531"
        //- update your credentials ID after creating credentials for connecting to ACR
        registryCredential = 'ACR'
        dockerImage = ''
        registryUrl = 'myacrrepo531.azurecr.io'
    }

    stages {
        stage('checkout') {
            steps {
                checkout([$class: 'GitSCM', branches: [[name: '*/master']], extensions: [], userRemoteConfigs: [[url: 'check_out_from_your_repo_after_forking_my_repo']]])
            }
        }

        stage ('Build Jar') {
        steps {
            sh 'mvn clean install'           
        }
     }

    stage ('Build Docker image') {
        steps {
                script {
                    dockerImage = docker.build registryName
                }
            }
        }

    // Uploading Docker images into ACR
        stage('Upload Image to ACR') {
         steps{   
             script {
                docker.withRegistry( "http://${registryUrl}", registryCredential ) {
                dockerImage.push("$BUILD_NUMBER")
                }
            }
          }
        }

        stage ('Helm Deploy') {
          steps {
            script {
                sh "helm upgrade first --install mychart --namespace helm-deployment --set image.tag=$BUILD_NUMBER"
                }
            }
        }
    }
}
# Azure AKS cluster through Terraform
Terraform, kubectl, Azure CLI to be installed on our Laptop and keep ready below files in VSCode
main.tf, providers.tf, variables.tf, output.tf, terraform.tfvars
az login
# providers.tf
provider "azurerm" {
  features {}
}

terraform {
  required_providers {
    azurerm = {
      source  = "hashicorp/azurerm"
      version = "3.62.1"
    }
  }
}

# variables.tf
variable "resource_group_name" {
  type        = string
  description = "RG name in Azure"
}
variable "location" {
  type        = string
  description = "Resources location in Azure"
}
variable "cluster_name" {
  type        = string
  description = "AKS name in Azure"
}
variable "kubernetes_version" {
  type        = string
  description = "Kubernetes version"
}
variable "system_node_count" {
  type        = number
  description = "Number of AKS worker nodes"
}
variable "acr_name" {
  type        = string
  description = "ACR name"
}

# terraform.tfvars
resource_group_name = "aks_tf_rg"
location            = "CentralUS"
cluster_name        = "my-aks-cluster"
kubernetes_version  = "1.26.3"
system_node_count   = 2
acr_name            = "myacr321012"

# main.tf
resource "azurerm_resource_group" "aks-rg" {
  name     = var.resource_group_name
  location = var.location
}

resource "azurerm_role_assignment" "role_acrpull" {
  scope                            = azurerm_container_registry.acr.id
  role_definition_name             = "AcrPull"
  principal_id                     = azurerm_kubernetes_cluster.aks.kubelet_identity.0.object_id
  skip_service_principal_aad_check = true
}

resource "azurerm_container_registry" "acr" {
  name                = var.acr_name
  resource_group_name = azurerm_resource_group.aks-rg.name
  location            = var.location
  sku                 = "Standard"
  admin_enabled       = false
}

resource "azurerm_kubernetes_cluster" "aks" {
  name                = var.cluster_name
  kubernetes_version  = var.kubernetes_version
  location            = var.location
  resource_group_name = azurerm_resource_group.aks-rg.name
  dns_prefix          = var.cluster_name

  default_node_pool {
    name                = "system"
    node_count          = var.system_node_count
    vm_size             = "Standard_DS2_v2"
    type                = "VirtualMachineScaleSets"
    zones  = [1, 2, 3]
    enable_auto_scaling = false
  }

  identity {
    type = "SystemAssigned"
  }

  network_profile {
    load_balancer_sku = "standard"
    network_plugin    = "kubenet" 
  }
}

# output.tf
output "aks_id" {
  value = azurerm_kubernetes_cluster.aks.id
}
output "aks_fqdn" {
  value = azurerm_kubernetes_cluster.aks.fqdn
}
output "aks_node_rg" {
  value = azurerm_kubernetes_cluster.aks.node_resource_group
}
output "acr_id" {
  value = azurerm_container_registry.acr.id
}
output "acr_login_server" {
  value = azurerm_container_registry.acr.login_server
}
resource "local_file" "kubeconfig" {
  depends_on   = [azurerm_kubernetes_cluster.aks]
  filename     = "kubeconfig"
  content      = azurerm_kubernetes_cluster.aks.kube_config_raw
}

terraform init --> terraform validate --> terraform plan --> terraform apply --> yes
mv kubeconfig ~/.kube/config
kubectl get nodes
kubectl create -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/controllers/nginx-deployment.yaml
kubectl get deployments
kubectl get pods
terraform destroy --auto-approve
bdfbds

Note: