diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 0000000..e288a7c --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,98 @@ +# GitHub Actions Workflows + +This directory contains GitHub Actions workflows for the DevOps The Hard Way - Azure tutorial. + +## πŸ“‹ Workflow Files + +### πŸ”§ `main.yml` - Tutorial Example Workflow +**Purpose**: Educational content demonstrating CI/CD pipeline setup +**Status**: ⚠️ **DISABLED** - Tutorial content only + +This workflow is provided as an example for learning purposes and is **not intended to run** in this tutorial repository. + +**Features:** +- Terraform deployment automation +- Azure OIDC authentication +- Static code analysis hooks (commented) +- Terraform documentation generation hooks (commented) + +**To Use This Workflow:** +1. Fork or copy this repository to your own GitHub account +2. Set up Azure OIDC authentication (see tutorial) +3. Enable the workflow by modifying the `on:` triggers +4. Customize the configuration for your environment + +### πŸš€ `deploy-full.yml` - Complete Deployment Pipeline +**Purpose**: Full infrastructure and application deployment +**Status**: βœ… **ACTIVE** - Manual trigger only + +This workflow provides complete deployment automation including: +- Infrastructure provisioning (ACR, VNET, Log Analytics, AKS) +- Docker image building and pushing +- Kubernetes application deployment +- ALB Controller and Gateway setup +- Optional resource cleanup + +**Triggers:** +- Manual execution only (`workflow_dispatch`) +- Environment selection (dev/staging/prod) +- Optional cleanup after deployment + +## πŸŽ“ Educational Notes + +### Why Two Workflows? + +1. **`main.yml`**: Demonstrates traditional CI/CD patterns + - Shows basic Terraform automation + - Includes hooks for advanced features + - Focuses on single component (AKS) + - Educational and reference material + +2. **`deploy-full.yml`**: Complete solution approach + - Deploys entire infrastructure stack + - Production-ready patterns + - Multi-environment support + - Practical automation tool + +### Security Considerations + +Both workflows use: +- βœ… Azure OIDC authentication (no stored secrets) +- βœ… Least privilege access patterns +- βœ… Environment-specific configurations +- βœ… Manual approval workflows for production + +### Best Practices Demonstrated + +- **Infrastructure as Code**: All resources defined in Terraform +- **GitOps**: Infrastructure changes through Git workflows +- **Immutable Infrastructure**: Complete rebuilds vs. updates +- **Environment Isolation**: Separate state files and configurations +- **Automated Testing**: Built-in validation and testing steps + +## πŸ”§ Setup Instructions + +### For Tutorial Learning: +1. Study the workflow files as examples +2. Understand the patterns and practices +3. Follow the tutorial documentation + +### For Practical Use: +1. Copy/fork this repository +2. Set up Azure service principal or OIDC +3. Configure GitHub secrets +4. Customize for your environment +5. Enable and run workflows + +## πŸ“š Related Documentation + +- [CI/CD Tutorial](../2-Terraform-AZURE-Services-Creation/5-Run-CICD-For-AKS-Cluster.md) +- [Deployment Scripts](../scripts/README.md) +- [Azure OIDC Setup](../2-Terraform-AZURE-Services-Creation/scripts/5-create-github-oidc.sh) + +## 🀝 Contributing + +These workflows are part of the tutorial content. If you find improvements or issues: +- Open an issue for discussion +- Submit a pull request with improvements +- Update related documentation diff --git a/.github/workflows/deploy-full.yml b/.github/workflows/deploy-full.yml new file mode 100644 index 0000000..a605896 --- /dev/null +++ b/.github/workflows/deploy-full.yml @@ -0,0 +1,397 @@ +name: Deploy DevOps The Hard Way - Azure + +on: + workflow_dispatch: + inputs: + environment: + description: 'Environment to deploy' + required: true + default: 'dev' + type: choice + options: + - dev + - staging + - prod + destroy_after_deploy: + description: 'Destroy resources after deployment (for testing)' + required: false + default: false + type: boolean + +env: + PROJECT_NAME: ${{ github.event.inputs.environment || 'devopsthehardway' }} + LOCATION: "uksouth" + ARM_CLIENT_ID: ${{ secrets.AZURE_AD_CLIENT_ID }} + ARM_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + ARM_TENANT_ID: ${{ secrets.AZURE_AD_TENANT_ID }} + ARM_USE_OIDC: true + +jobs: + deploy: + name: πŸš€ Deploy Infrastructure and Application + runs-on: ubuntu-latest + permissions: + contents: read + id-token: write + pull-requests: write + + steps: + - name: πŸ“₯ Checkout Code + uses: actions/checkout@v6 + + - name: πŸ” Azure Login + uses: azure/login@v3 + with: + client-id: ${{ secrets.AZURE_AD_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_AD_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: πŸ—οΈ Setup Terraform + uses: hashicorp/setup-terraform@v4 + with: + terraform_version: 1.14.8 + terraform_wrapper: false + + - name: 🐳 Set up Docker Buildx + uses: docker/setup-buildx-action@v4 + + - name: ☸️ Setup kubectl + uses: azure/setup-kubectl@v5 + with: + version: 'latest' + + - name: βš™οΈ Setup Helm + uses: azure/setup-helm@v5 + with: + version: 'latest' + + - name: πŸ—οΈ Create Terraform State Storage + run: | + TERRAFORM_RG="${PROJECT_NAME}-terraform-rg" + STORAGE_ACCOUNT="${PROJECT_NAME}tfstate" + + # Create resource group for terraform state + az group create --name "$TERRAFORM_RG" --location "$LOCATION" + + # Create storage account + az storage account create \ + --name "$STORAGE_ACCOUNT" \ + --resource-group "$TERRAFORM_RG" \ + --location "$LOCATION" \ + --sku Standard_LRS \ + --encryption-services blob + + # Create storage container + az storage container create \ + --name tfstate \ + --account-name "$STORAGE_ACCOUNT" + + - name: πŸ—οΈ Create Azure AD Group for AKS + run: | + GROUP_NAME="AKS-Admins-${PROJECT_NAME}" + + # Check if group exists + GROUP_ID=$(az ad group list --filter "displayName eq '${GROUP_NAME}'" --query "[0].id" -o tsv) + + if [ -z "$GROUP_ID" ] || [ "$GROUP_ID" = "null" ]; then + echo "Creating new AD group: $GROUP_NAME" + GROUP_ID=$(az ad group create \ + --display-name "$GROUP_NAME" \ + --mail-nickname "aks-admins-${PROJECT_NAME}" \ + --query id -o tsv) + fi + + echo "AKS_ADMINS_GROUP_ID=$GROUP_ID" >> $GITHUB_ENV + echo "Azure AD Group ID: $GROUP_ID" + + - name: πŸ—οΈ Deploy ACR + run: | + cd 2-Terraform-AZURE-Services-Creation/1-acr + + # Update terraform.tfvars + cat > terraform.tfvars << EOF + name = "$PROJECT_NAME" + location = "$LOCATION" + + tags = { + "Purpose" = "azure-devops-hardway" + "Environment" = "${{ github.event.inputs.environment || 'dev' }}" + "DeployedBy" = "GitHub-Actions" + "Project" = "$PROJECT_NAME" + } + EOF + + terraform init \ + -backend-config="resource_group_name=${PROJECT_NAME}-terraform-rg" \ + -backend-config="storage_account_name=${PROJECT_NAME}tfstate" \ + -backend-config="container_name=tfstate" \ + -backend-config="key=acr-terraform.tfstate" + + terraform plan -out=tfplan + terraform apply tfplan + + - name: πŸ—οΈ Deploy VNET + run: | + cd 2-Terraform-AZURE-Services-Creation/2-vnet + + # Update terraform.tfvars + cat > terraform.tfvars << EOF + name = "$PROJECT_NAME" + location = "$LOCATION" + network_address_space = "192.168.0.0/16" + aks_subnet_address_name = "aks" + aks_subnet_address_prefix = "192.168.0.0/24" + subnet_address_name = "appgw" + subnet_address_prefix = "192.168.4.0/24" + + tags = { + "Purpose" = "azure-devops-hardway" + "Environment" = "${{ github.event.inputs.environment || 'dev' }}" + "DeployedBy" = "GitHub-Actions" + "Project" = "$PROJECT_NAME" + } + EOF + + terraform init \ + -backend-config="resource_group_name=${PROJECT_NAME}-terraform-rg" \ + -backend-config="storage_account_name=${PROJECT_NAME}tfstate" \ + -backend-config="container_name=tfstate" \ + -backend-config="key=vnet-terraform.tfstate" + + terraform plan -out=tfplan + terraform apply tfplan + + - name: πŸ—οΈ Deploy Log Analytics + run: | + cd 2-Terraform-AZURE-Services-Creation/3-log-analytics + + # Update terraform.tfvars + cat > terraform.tfvars << EOF + name = "$PROJECT_NAME" + location = "$LOCATION" + + tags = { + "Purpose" = "azure-devops-hardway" + "Environment" = "${{ github.event.inputs.environment || 'dev' }}" + "DeployedBy" = "GitHub-Actions" + "Project" = "$PROJECT_NAME" + } + EOF + + terraform init \ + -backend-config="resource_group_name=${PROJECT_NAME}-terraform-rg" \ + -backend-config="storage_account_name=${PROJECT_NAME}tfstate" \ + -backend-config="container_name=tfstate" \ + -backend-config="key=la-terraform.tfstate" + + terraform plan -out=tfplan + terraform apply tfplan + + - name: πŸ—οΈ Deploy AKS Cluster + run: | + cd 2-Terraform-AZURE-Services-Creation/4-aks + + # Generate SSH key for AKS + ssh-keygen -t rsa -b 4096 -f ~/.ssh/aks_key -N "" -C "github-actions" + SSH_PUBLIC_KEY=$(cat ~/.ssh/aks_key.pub) + + # Update terraform.tfvars + cat > terraform.tfvars << EOF + name = "$PROJECT_NAME" + location = "$LOCATION" + + kubernetes_version = "1.33" + agent_count = 2 + vm_size = "Standard_DS2_v2" + ssh_public_key = "$SSH_PUBLIC_KEY" + aks_admins_group_object_id = "$AKS_ADMINS_GROUP_ID" + + tags = { + "Purpose" = "azure-devops-hardway" + "Environment" = "${{ github.event.inputs.environment || 'dev' }}" + "DeployedBy" = "GitHub-Actions" + "Project" = "$PROJECT_NAME" + } + EOF + + terraform init \ + -backend-config="resource_group_name=${PROJECT_NAME}-terraform-rg" \ + -backend-config="storage_account_name=${PROJECT_NAME}tfstate" \ + -backend-config="container_name=tfstate" \ + -backend-config="key=aks-terraform.tfstate" + + terraform plan -out=tfplan + terraform apply tfplan + + # Get AKS credentials + az aks get-credentials \ + --resource-group "${PROJECT_NAME}-rg" \ + --name "${PROJECT_NAME}aks" \ + --overwrite-existing + + - name: 🐳 Build and Push Docker Image + run: | + cd 3-Docker + + # Login to ACR + az acr login --name "${PROJECT_NAME}azurecr" + + # Build and push image + docker build --platform linux/amd64 \ + -t "${PROJECT_NAME}azurecr.azurecr.io/thomasthorntoncloud:v2" . + + docker push "${PROJECT_NAME}azurecr.azurecr.io/thomasthorntoncloud:v2" + + - name: ☸️ Deploy Application to Kubernetes + run: | + cd 4-kubernetes_manifest + + # Update deployment.yml with correct image + sed -i "s/devopsthehardwayazurecr.azurecr.io/${PROJECT_NAME}azurecr.azurecr.io/g" deployment.yml + + # Deploy application + kubectl apply -f deployment.yml + + # Wait for deployment to be ready + kubectl wait --for=condition=available --timeout=300s deployment/thomasthornton -n thomasthorntoncloud + + - name: 🌐 Install ALB Controller + run: | + cd 4-kubernetes_manifest + + # Update script with correct resource names + sed -i "s/devopsthehardway-rg/${PROJECT_NAME}-rg/g" scripts/1-alb-controller-install-k8s.sh + sed -i "s/devopsthehardwayaks/${PROJECT_NAME}aks/g" scripts/1-alb-controller-install-k8s.sh + sed -i "s/devopsthehardway-vnet/${PROJECT_NAME}-vnet/g" scripts/1-alb-controller-install-k8s.sh + + # Install ALB Controller + chmod +x scripts/1-alb-controller-install-k8s.sh + ./scripts/1-alb-controller-install-k8s.sh + + # Wait for ALB Controller to be ready + kubectl wait --for=condition=available --timeout=300s deployment/alb-controller -n azure-alb-system + + - name: 🌐 Create Gateway Resources + run: | + cd 4-kubernetes_manifest + + # Update script with correct resource names + sed -i "s/devopsthehardwayjuly25-rg/${PROJECT_NAME}-rg/g" scripts/2-gateway-api-resources.sh + sed -i "s/devopsthehardway-alb/${PROJECT_NAME}-alb/g" scripts/2-gateway-api-resources.sh + + # Create gateway resources + chmod +x scripts/2-gateway-api-resources.sh + ./scripts/2-gateway-api-resources.sh + + # Wait for gateway to get IP + sleep 60 + + - name: πŸ§ͺ Test Application + run: | + # Get gateway IP + GATEWAY_IP=$(kubectl get gateway gateway-01 -n thomasthorntoncloud -o jsonpath='{.status.addresses[0].value}' 2>/dev/null || echo "") + + if [ -n "$GATEWAY_IP" ]; then + echo "🌐 Application URL: http://$GATEWAY_IP" + + # Test application + for i in {1..10}; do + if curl -s -f "http://$GATEWAY_IP" > /dev/null; then + echo "βœ… Application is responding correctly!" + break + else + echo "⏳ Waiting for application to be ready (attempt $i/10)..." + sleep 30 + fi + done + else + echo "⚠️ Gateway IP not yet available" + kubectl get gateway gateway-01 -n thomasthorntoncloud -o yaml + fi + + - name: πŸ“Š Deployment Summary + run: | + echo "## πŸŽ‰ Deployment Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### πŸ“‹ Resources Created:" >> $GITHUB_STEP_SUMMARY + echo "- βœ… Azure Container Registry" >> $GITHUB_STEP_SUMMARY + echo "- βœ… Virtual Network with subnets" >> $GITHUB_STEP_SUMMARY + echo "- βœ… Log Analytics workspace" >> $GITHUB_STEP_SUMMARY + echo "- βœ… AKS cluster with auto-scaling" >> $GITHUB_STEP_SUMMARY + echo "- βœ… Docker image built and pushed" >> $GITHUB_STEP_SUMMARY + echo "- βœ… Application deployed to Kubernetes" >> $GITHUB_STEP_SUMMARY + echo "- βœ… ALB Controller and Gateway configured" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + GATEWAY_IP=$(kubectl get gateway gateway-01 -n thomasthorntoncloud -o jsonpath='{.status.addresses[0].value}' 2>/dev/null || echo "Pending") + echo "### 🌐 Application Access:" >> $GITHUB_STEP_SUMMARY + echo "**URL:** http://$GATEWAY_IP" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### πŸ—οΈ Project Details:" >> $GITHUB_STEP_SUMMARY + echo "- **Project Name:** $PROJECT_NAME" >> $GITHUB_STEP_SUMMARY + echo "- **Environment:** ${{ github.event.inputs.environment || 'dev' }}" >> $GITHUB_STEP_SUMMARY + echo "- **Location:** $LOCATION" >> $GITHUB_STEP_SUMMARY + echo "- **Kubernetes Version:** 1.33" >> $GITHUB_STEP_SUMMARY + + cleanup: + name: πŸ—‘οΈ Cleanup Resources (if requested) + runs-on: ubuntu-latest + needs: deploy + if: ${{ github.event.inputs.destroy_after_deploy == 'true' }} + permissions: + contents: read + id-token: write + + steps: + - name: πŸ“₯ Checkout Code + uses: actions/checkout@v6 + + - name: πŸ” Azure Login + uses: azure/login@v3 + with: + client-id: ${{ secrets.AZURE_AD_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_AD_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: πŸ—οΈ Setup Terraform + uses: hashicorp/setup-terraform@v4 + with: + terraform_version: 1.14.8 + terraform_wrapper: false + + - name: ☸️ Setup kubectl + uses: azure/setup-kubectl@v5 + with: + version: 'latest' + + - name: βš™οΈ Setup Helm + uses: azure/setup-helm@v5 + with: + version: 'latest' + + - name: πŸ—‘οΈ Run Cleanup Script + run: | + # Make cleanup script executable + chmod +x scripts/cleanup-all.sh + + # Set environment variables for cleanup + export PROJECT_NAME="${PROJECT_NAME}" + export LOCATION="${LOCATION}" + + # Run cleanup with auto-confirmation + echo "DELETE" | ./scripts/cleanup-all.sh + + - name: πŸ“Š Cleanup Summary + run: | + echo "## πŸ—‘οΈ Cleanup Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### πŸ“‹ Resources Cleaned Up:" >> $GITHUB_STEP_SUMMARY + echo "- πŸ—‘οΈ Kubernetes deployments and services" >> $GITHUB_STEP_SUMMARY + echo "- πŸ—‘οΈ ALB Controller and Gateway resources" >> $GITHUB_STEP_SUMMARY + echo "- πŸ—‘οΈ AKS cluster and node pools" >> $GITHUB_STEP_SUMMARY + echo "- πŸ—‘οΈ Virtual Network and subnets" >> $GITHUB_STEP_SUMMARY + echo "- πŸ—‘οΈ Log Analytics workspace" >> $GITHUB_STEP_SUMMARY + echo "- πŸ—‘οΈ Azure Container Registry" >> $GITHUB_STEP_SUMMARY + echo "- πŸ—‘οΈ Resource groups (deletion in progress)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Note:** Resource group deletions are running in the background and may take 10-15 minutes to complete." diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 676f1ad..5ea0b21 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,40 +1,115 @@ -name: CI +# TUTORIAL WORKFLOW - NOT ACTIVE +# This workflow is part of the DevOps The Hard Way tutorial content +# It is disabled to prevent accidental runs in the tutorial repository +# Copy this to their own repositories and modify as needed +name: Terraform-Deploy (Tutorial Example) + +# This workflow only runs manually and is intended as tutorial content on: workflow_dispatch: + inputs: + tutorial_mode: + description: 'This is a tutorial workflow - copy to your own repo to use' + required: true + default: 'tutorial-only' + type: choice + options: + - tutorial-only jobs: - build: + tutorial-info: + name: ⚠️ Tutorial Workflow Information + runs-on: ubuntu-latest + if: ${{ github.event.inputs.tutorial_mode == 'tutorial-only' }} + + steps: + - name: Tutorial Information + run: | + echo "πŸŽ“ This is a TUTORIAL workflow from DevOps The Hard Way - Azure" + echo "" + echo "πŸ“‹ This workflow is provided as example content for learning purposes." + echo "πŸ“‹ It is not intended to run in the tutorial repository." + echo "" + echo "βœ… To use this workflow:" + echo "1. Copy this repository to your own GitHub account" + echo "2. Set up Azure OIDC authentication secrets" + echo "3. Modify the workflow for your specific needs" + echo "4. Update the terraform.tfvars and backend configuration" + echo "" + echo "πŸ“š For full instructions, see the tutorial documentation." + echo "" + echo "❌ This workflow will not deploy any resources in tutorial mode." + + terraform: + name: Terraform-Deploy runs-on: ubuntu-latest + if: ${{ github.event.inputs.tutorial_mode != 'tutorial-only' }} + permissions: + contents: write + id-token: write # Required for OIDC + + env: + ARM_CLIENT_ID: ${{ secrets.AZURE_AD_CLIENT_ID }} + ARM_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + ARM_TENANT_ID: ${{ secrets.AZURE_AD_TENANT_ID }} + ARM_USE_OIDC: true + tf_resource_group_name: "thomasthorntoncloud" + tf_storage_account_name: "thomasthorntontfstate" + tf_state_container: "devopsthehardwaygithub" + tf_state_key: "terraform.tfstate" + + steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: us-west-1 - - - name: Setup Terraform - uses: hashicorp/setup-terraform@v1 - - - name: Terraform Init - working-directory: Terraform-AWS-Services/elasticsearch/elasticsearch_configuration/ - run: | - terraform init \ - -backend-config "bucket=terraform-states-monitoring-platform" \ - -backend-config "key=elasticsearch-terraform.tfstate" - terraform workspace new dev || terraform workspace select dev - - name: Terraform Format - working-directory: Terraform-AWS-Services/elasticsearch/elasticsearch_configuration/ - run: terraform fmt - - - name: Terraform Plan - working-directory: Terraform-AWS-Services/elasticsearch/elasticsearch_configuration/ - run: terraform plan -var="environment=development" -var="elasticsearch_password=${{ secrets.ELASTICSEARCH_PASSWORD }}" - - - name: Terraform Apply - working-directory: Terraform-AWS-Services/elasticsearch/elasticsearch_configuration/ - run: terraform apply -var="environment=development" -var="elasticsearch_password=${{ secrets.ELASTICSEARCH_PASSWORD }}" -auto-approve + - name: Checkout Code + uses: actions/checkout@v6 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v4 + with: + terraform_version: 1.14.8 + terraform_wrapper: true + + # Add in tutorial 6-Terarform-Docs + # - name: Render terraform docs and push changes back to PR + # uses: terraform-docs/gh-actions@v1.3.0 + # with: + # working-dir: ./2-Terraform-AZURE-Services-Creation/1-acr, ./2-Terraform-AZURE-Services-Creation/2-vnet, ./2-Terraform-AZURE-Services-Creation/3-log-analytics, ./2-Terraform-AZURE-Services-Creation/4-aks + # output-file: README.md + # output-method: inject + # git-push: "true" + + - name: Terraform Init + run: terraform init + working-directory: ./2-Terraform-AZURE-Services-Creation/4-aks + + # Add in tutorial 5-Terraform-Static-Code-Analysis + # - name: tfsec + # uses: aquasecurity/tfsec-pr-commenter-action@v1.3.0 + # with: + # tfsec_args: --soft-fail + # github_token: ${{ github.token }} + + - name: Terraform Format + if: github.event_name == 'pull_request' + run: terraform fmt + working-directory: ./2-Terraform-AZURE-Services-Creation/4-aks + + - name: Auto Commit Changes + uses: stefanzweifel/git-auto-commit-action@v7 + if: github.event_name == 'pull_request' + with: + commit_message: "Terraform fmt" + file_pattern: "*.tf *.tfvars" + commit_user_name: "github-actions[bot]" + + - name: Terraform Plan + run: terraform plan -no-color -input=false + working-directory: ./2-Terraform-AZURE-Services-Creation/4-aks + env: + DEPLOYMENT_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: Terraform Apply + if: github.ref == 'refs/heads/main' && github.event_name != 'pull_request' + run: terraform apply -auto-approve -input=false + working-directory: ./2-Terraform-AZURE-Services-Creation/4-aks diff --git a/.gitignore b/.gitignore index f50f9ea..b811bc9 100644 --- a/.gitignore +++ b/.gitignore @@ -28,5 +28,6 @@ override.tf.json # # !example_override.tf -# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan -# example: *tfplan* +# Ignore tfplan binary output files +tfplan +*tfplan* diff --git a/1-Azure/1-Configure-Terraform-Remote-Storage.md b/1-Azure/1-Configure-Terraform-Remote-Storage.md new file mode 100644 index 0000000..67ae344 --- /dev/null +++ b/1-Azure/1-Configure-Terraform-Remote-Storage.md @@ -0,0 +1,368 @@ +# πŸ—„οΈ Configure Storage Account for Terraform State File + +> **Estimated Time:** ⏱️ **10-15 minutes** + +## 🎯 **Learning Objectives** + +By the end of this tutorial, you will: +- [ ] **Understand Terraform state management** and remote storage benefits +- [ ] **Create Azure Storage Account** with security best practices +- [ ] **Configure blob container** for Terraform state files +- [ ] **Implement security measures** for production-ready storage +- [ ] **Validate storage setup** through Azure Portal and CLI + +## πŸ“‹ **Prerequisites** + +**βœ… Required Knowledge:** +- [ ] Basic understanding of Terraform fundamentals +- [ ] Familiarity with Azure Resource Groups and Storage Accounts +- [ ] Command-line interface operations + +**πŸ”§ Required Tools:** +- [ ] Azure CLI installed and configured +- [ ] Azure subscription with Contributor permissions +- [ ] Terminal/command line access +- [ ] Text editor for script customization + +**πŸ—οΈ Infrastructure Dependencies:** +- [ ] Active Azure subscription +- [ ] Azure CLI authenticated (`az login` completed) +- [ ] Sufficient permissions to create resources + +## οΏ½ **Step-by-Step Implementation** + +### **Step 1: Understand Remote State Benefits** ⏱️ *3 minutes* + +1. **πŸ“š Why Remote State Storage?** + + **🎯 Benefits of Remote State:** + - [ ] **Team Collaboration** - Multiple developers can share state + - [ ] **State Locking** - Prevents concurrent modifications + - [ ] **Security** - Centralized, encrypted storage + - [ ] **Backup & Recovery** - Automatic versioning and backup + - [ ] **Consistency** - Single source of truth for infrastructure state + +2. **πŸ”’ Azure Blob Storage Advantages** + - [ ] **Encryption at rest** - Data automatically encrypted + - [ ] **Access control** - Fine-grained permissions with Azure RBAC + - [ ] **Versioning** - Built-in state file versioning + - [ ] **Geo-redundancy** - High availability across regions + - [ ] **Cost-effective** - Pay-as-you-use storage model + +### **Step 2: Customize Configuration Script** ⏱️ *5 minutes* + +3. **πŸ“‚ Navigate to Azure Scripts Directory** + ```bash + cd 1-Azure/scripts + ls -la + ``` + **βœ… Expected Files:** + - `1-create-terraform-storage.sh` + - `2-create-azure-ad-group.sh` + +4. **πŸ“ Review and Customize Storage Script** + ```bash + # View the script content + cat 1-create-terraform-storage.sh + ``` + +5. **βš™οΈ Update Configuration Variables** + ```bash + # Edit the script with your preferred editor + nano 1-create-terraform-storage.sh + # or + code 1-create-terraform-storage.sh + ``` + + **🎯 Key Variables to Customize:** + ```bash + # Find and update these lines: + RESOURCE_GROUP_NAME="devopsthehardway-rg" + STORAGE_ACCOUNT_NAME="devopsthehardwaysa" + CONTAINER_NAME="tfstate" + LOCATION="uksouth" + ``` + + **πŸ’‘ Naming Guidelines:** + - **Storage Account:** 3-24 characters, lowercase letters and numbers only + - **Resource Group:** Descriptive name for your project + - **Location:** Choose region closest to your development team + +6. **πŸ” Understand Script Operations** + + **πŸ“‹ Script Workflow:** + ```bash + # The script will: + # 1. Create Resource Group + az group create --name $RESOURCE_GROUP_NAME --location $LOCATION + + # 2. Create Storage Account with security settings + az storage account create \ + --name $STORAGE_ACCOUNT_NAME \ + --resource-group $RESOURCE_GROUP_NAME \ + --location $LOCATION \ + --sku Standard_LRS \ + --encryption-services blob \ + --https-only true \ + --allow-blob-public-access false + + # 3. Create Blob Container + az storage container create \ + --name $CONTAINER_NAME \ + --account-name $STORAGE_ACCOUNT_NAME + ``` + +### **Step 3: Execute Storage Setup** ⏱️ *5 minutes* + +7. **πŸš€ Run the Storage Creation Script** + ```bash + # Make script executable + chmod +x 1-create-terraform-storage.sh + + # Execute the script + ./1-create-terraform-storage.sh + ``` + **⏱️ Execution Time:** 2-3 minutes + + **βœ… Expected Output:** + ```json + { + "id": "/subscriptions/.../resourceGroups/devopsthehardway-rg", + "location": "uksouth", + "name": "devopsthehardway-rg", + "properties": { + "provisioningState": "Succeeded" + } + } + ``` + +8. **πŸ“‹ Capture Backend Configuration** + ```bash + # The script should output Terraform backend configuration + # Save this for use in your Terraform configurations: + + # Example output: + # terraform { + # backend "azurerm" { + # resource_group_name = "devopsthehardway-rg" + # storage_account_name = "devopsthehardwaysa" + # container_name = "tfstate" + # key = "terraform.tfstate" + # } + # } + ``` + +### **Step 4: Verify and Secure Setup** ⏱️ *4 minutes* + +9. **πŸ” Verify Through Azure Portal** + - Navigate to [Azure Portal](https://portal.azure.com) + - Search for your resource group + - Verify storage account creation + - Check blob container exists + +10. **πŸ“‹ Verify Using Azure CLI** + ```bash + # Check resource group + az group show --name devopsthehardway-rg --output table + + # Verify storage account + az storage account show --name devopsthehardwaysa --resource-group devopsthehardway-rg --output table + + # List containers + az storage container list --account-name devopsthehardwaysa --output table + ``` + +11. **πŸ”’ Implement Additional Security (Recommended)** + ```bash + # Add resource lock to prevent accidental deletion + az lock create \ + --name "TerraformStorageLock" \ + --lock-type CanNotDelete \ + --resource-group devopsthehardway-rg \ + --resource-name devopsthehardwaysa \ + --resource-type Microsoft.Storage/storageAccounts + + # Enable soft delete for blobs + az storage blob service-properties delete-policy update \ + --account-name devopsthehardwaysa \ + --enable true \ + --days-retained 7 + ``` + +## βœ… **Validation Steps** + +**πŸ” Infrastructure Validation:** +- [ ] Resource group created successfully +- [ ] Storage account deployed with correct configuration +- [ ] Blob container accessible and properly configured +- [ ] Security settings applied (HTTPS-only, encryption enabled) +- [ ] Access permissions properly configured + +**πŸ”§ Technical Validation:** +```bash +# Comprehensive validation script +echo "πŸ—„οΈ Validating Terraform storage setup..." + +# Check if resource group exists +RG_NAME="devopsthehardway-rg" +SA_NAME="devopsthehardwaysa" + +if az group show --name $RG_NAME &>/dev/null; then + echo "βœ… Resource group exists" + + # Check storage account + if az storage account show --name $SA_NAME --resource-group $RG_NAME &>/dev/null; then + echo "βœ… Storage account exists" + + # Check encryption settings + ENCRYPTION=$(az storage account show --name $SA_NAME --resource-group $RG_NAME --query "encryption.services.blob.enabled" -o tsv) + echo "πŸ”’ Blob encryption enabled: $ENCRYPTION" + + # Check HTTPS enforcement + HTTPS_ONLY=$(az storage account show --name $SA_NAME --resource-group $RG_NAME --query "enableHttpsTrafficOnly" -o tsv) + echo "πŸ” HTTPS-only enabled: $HTTPS_ONLY" + + # Check container + CONTAINER_COUNT=$(az storage container list --account-name $SA_NAME --output tsv | wc -l) + echo "πŸ“¦ Containers created: $CONTAINER_COUNT" + + echo "βœ… Terraform storage validation complete!" + else + echo "❌ Storage account not found" + exit 1 + fi +else + echo "❌ Resource group not found" + exit 1 +fi +``` + +**πŸ“Š Security Checklist:** +- [ ] **Encryption** - Blob storage encryption enabled +- [ ] **HTTPS** - Secure transport enforced +- [ ] **Public Access** - Blob public access disabled +- [ ] **Access Control** - Proper RBAC permissions +- [ ] **Resource Lock** - Protection against accidental deletion + +## 🚨 **Troubleshooting Guide** + +**❌ Common Setup Issues:** +```bash +# Problem: Storage account name already exists globally +# Solution: Storage account names must be globally unique +az storage account check-name --name $STORAGE_ACCOUNT_NAME + +# Problem: Insufficient permissions +# Solution: Verify your Azure CLI permissions +az role assignment list --assignee $(az account show --query user.name -o tsv) + +# Problem: Region not available +# Solution: Check available locations +az account list-locations --output table +``` + +**πŸ”§ Configuration Issues:** +```bash +# Problem: Container creation fails +# Solution: Verify storage account exists and permissions +az storage account show --name $SA_NAME --resource-group $RG_NAME + +# Problem: Script execution fails +# Solution: Check script permissions and syntax +chmod +x 1-create-terraform-storage.sh +bash -n 1-create-terraform-storage.sh # Syntax check + +# Problem: Backend configuration not working +# Solution: Verify storage account key access +az storage account keys list --resource-group $RG_NAME --account-name $SA_NAME +``` + +**🧹 Cleanup Commands:** +```bash +# Remove resource lock before deletion +az lock delete --name "TerraformStorageLock" --resource-group $RG_NAME + +# Delete storage account (careful!) +az storage account delete --name $SA_NAME --resource-group $RG_NAME --yes + +# Delete resource group (removes everything) +az group delete --name $RG_NAME --yes --no-wait +``` + +## πŸ’‘ **Knowledge Check** + +**🎯 Terraform State Concepts:** +1. What happens if multiple people run Terraform simultaneously without remote state? +2. How does Terraform state locking prevent conflicts? +3. What information is stored in the Terraform state file? +4. How does remote state improve team collaboration? + +**πŸ“ Answers:** +1. **Concurrent execution** can cause state corruption and conflicting infrastructure changes +2. **State locking** uses blob leases to ensure only one Terraform operation runs at a time +3. **State file** contains resource mappings, metadata, dependencies, and current configuration +4. **Remote state** provides shared access, consistency, security, and automated backup + +**πŸ” Advanced Concepts:** +- **State Encryption:** How is sensitive data protected in state files? +- **Versioning:** How can you roll back to previous state versions? +- **Migration:** How would you migrate from local to remote state? + +## 🎯 **Next Steps** + +**βœ… Upon Completion:** +- [ ] Secure Azure Storage Account created for Terraform state +- [ ] Blob container configured with proper permissions +- [ ] Security best practices implemented +- [ ] Backend configuration ready for Terraform projects +- [ ] Understanding of remote state management benefits + +**➑️ Continue to:** [Create Azure AD Group for AKS Admins](./2-Create-Azure-AD-Group-AKS-Admins.md) + +--- + +## πŸ“š **Additional Resources** + +- πŸ”— [Terraform Backend Configuration](https://www.terraform.io/language/settings/backends/azurerm) +- πŸ”— [Azure Storage Security Guide](https://docs.microsoft.com/en-us/azure/storage/common/storage-security-guide) +- πŸ”— [Terraform State Best Practices](https://www.terraform.io/language/state) +- πŸ”— [Azure CLI Storage Commands](https://docs.microsoft.com/en-us/cli/azure/storage) + +**🎯 Pro Tips:** +- Use **separate storage accounts** for different environments (dev/staging/prod) +- Enable **soft delete and versioning** for production workloads +- Implement **network restrictions** to limit storage account access +- Consider **customer-managed keys** for additional encryption control + +## πŸ” Verification +To ensure everything was set up correctly: + +1. Log into the [Azure Portal](https://portal.azure.com). +2. Navigate to your newly created Resource Group. +3. Verify the presence of the Storage Account. +4. Within the Storage Account, check for the Blob container. +5. It should look similar to this: + +![](images/storage-account.png) + +## 🧠 Knowledge Check +After running the script, try to answer these questions: +1. Why is it important to use remote state storage for Terraform? +2. What are the benefits of using Azure Blob Storage for this purpose? +3. How would you access this state file in your Terraform configurations? + +## πŸ’‘ Pro Tip +Consider implementing these additional security measures for production environments: +1. Enable soft delete and versioning for your blob storage to protect against accidental deletion +2. Set up a resource lock to prevent accidental deletion of the storage account +3. Use Managed Identities instead of storage account keys for authentication +4. Configure network rules to restrict access to specific networks +5. Set up Azure Key Vault to store sensitive backend configuration + +Example of adding a resource lock: +```bash +az lock create --name LockTerraformStorage --lock-type CanNotDelete \ + --resource-group devopshardway-rg \ + --resource-name devopshardwaysa \ + --resource-type Microsoft.Storage/storageAccounts +``` \ No newline at end of file diff --git a/1-Azure/2-Create-Azure-AD-Group-AKS-Admins.md b/1-Azure/2-Create-Azure-AD-Group-AKS-Admins.md new file mode 100644 index 0000000..e5c4c86 --- /dev/null +++ b/1-Azure/2-Create-Azure-AD-Group-AKS-Admins.md @@ -0,0 +1,348 @@ +# πŸ‘₯ Create Azure AD Group for AKS Admins + +> **Estimated Time:** ⏱️ **8-12 minutes** + +## 🎯 **Learning Objectives** + +By the end of this tutorial, you will: +- [ ] **Understand Azure AD integration** with AKS for authentication +- [ ] **Create Azure AD security group** for AKS administrators +- [ ] **Configure group membership** for cluster access +- [ ] **Implement RBAC best practices** for Kubernetes access control +- [ ] **Validate group setup** through Azure Portal + +## πŸ“‹ **Prerequisites** + +**βœ… Required Knowledge:** +- [ ] Basic understanding of Azure Active Directory concepts +- [ ] Familiarity with Role-Based Access Control (RBAC) +- [ ] Understanding of Kubernetes authentication principles + +**οΏ½ Required Tools:** +- [ ] Azure CLI installed and authenticated +- [ ] Sufficient Azure AD permissions (User Administrator or Global Administrator) +- [ ] Access to Azure Portal for verification +- [ ] Completed: [Configure Terraform Remote Storage](./1-Configure-Terraform-Remote-Storage.md) + +**πŸ—οΈ Infrastructure Dependencies:** +- [ ] Active Azure subscription with Azure AD tenant +- [ ] Azure CLI session authenticated with appropriate permissions +- [ ] Access to create and manage Azure AD groups + +## πŸš€ **Step-by-Step Implementation** + +### **Step 1: Understand Azure AD AKS Integration** ⏱️ *3 minutes* + +1. **πŸ“š Why Azure AD Groups for AKS?** + + **🎯 Benefits of Azure AD Integration:** + - [ ] **Centralized Identity Management** - Single source of truth for user authentication + - [ ] **Group-Based Access Control** - Manage permissions at scale + - [ ] **Enterprise Security** - MFA, Conditional Access, PIM integration + - [ ] **Audit and Compliance** - Comprehensive access logging + - [ ] **Simplified Management** - No separate Kubernetes user accounts + +2. **πŸ”’ RBAC Architecture** + ``` + Azure AD User β†’ Azure AD Group β†’ AKS RBAC β†’ Kubernetes Resources + ``` + - **Azure AD User:** Individual user accounts + - **Azure AD Group:** Collection of users with similar access needs + - **AKS RBAC:** Kubernetes role bindings to Azure AD groups + - **Kubernetes Resources:** Pods, services, deployments, etc. + +3. **🎯 Access Levels Planning** + - **AKS Admins:** Full cluster access (cluster-admin role) + - **Developers:** Namespace-specific access + - **Viewers:** Read-only access to specific resources + +### **Step 2: Execute Group Creation Script** ⏱️ *4 minutes* + +4. **πŸ“‚ Navigate to Scripts Directory** + ```bash + cd 1-Azure/scripts + ls -la + ``` + **βœ… Expected Files:** + - `1-create-terraform-storage.sh` + - `2-create-azure-ad-group.sh` + +5. **πŸ“ Review Script Contents** + ```bash + # Examine the script before execution + cat 2-create-azure-ad-group.sh + ``` + + **πŸ” Script Operations:** + ```bash + # The script will: + # 1. Create Azure AD Group + az ad group create \ + --display-name "devopsthehardway-aks-group" \ + --mail-nickname "devopsthehardway-aks-group" \ + --description "AKS administrators group for DevOps The Hard Way tutorial" + + # 2. Get current user ID + CURRENT_USER_ID=$(az ad signed-in-user show --query id -o tsv) + + # 3. Add current user to group + az ad group member add \ + --group "devopsthehardway-aks-group" \ + --member-id $CURRENT_USER_ID + + # 4. Output group ID for later use + GROUP_ID=$(az ad group show --group "devopsthehardway-aks-group" --query id -o tsv) + echo "Azure AD Group ID: $GROUP_ID" + ``` + +6. **πŸš€ Execute the Group Creation Script** + ```bash + # Make script executable + chmod +x 2-create-azure-ad-group.sh + + # Run the script + ./2-create-azure-ad-group.sh + ``` + **⏱️ Execution Time:** 30-60 seconds + + **βœ… Expected Output:** + ```json + { + "displayName": "devopsthehardway-aks-group", + "id": "12345678-1234-1234-1234-123456789012", + "mailNickname": "devopsthehardway-aks-group" + } + + Azure AD Group ID: 12345678-1234-1234-1234-123456789012 + ``` + +7. **πŸ“‹ Save Group ID for Future Use** + ```bash + # Copy the Group ID output - you'll need this for AKS configuration + # Example: 12345678-1234-1234-1234-123456789012 + + # Optional: Save to environment variable for current session + export AKS_ADMIN_GROUP_ID="12345678-1234-1234-1234-123456789012" + echo "Group ID saved: $AKS_ADMIN_GROUP_ID" + ``` + +### **Step 3: Verify and Enhance Group Setup** ⏱️ *4 minutes* + +8. **πŸ” Verify Through Azure Portal** + - Navigate to [Azure Portal](https://portal.azure.com) + - Go to **Azure Active Directory > Groups** + - Search for `devopsthehardway-aks-group` + - Verify group exists and current user is a member + +9. **πŸ“‹ Verify Using Azure CLI** + ```bash + # Check if group exists + az ad group show --group "devopsthehardway-aks-group" --output table + + # List group members + az ad group member list --group "devopsthehardway-aks-group" --output table + + # Verify current user is a member + az ad group member check --group "devopsthehardway-aks-group" --member-id $(az ad signed-in-user show --query id -o tsv) + ``` + +10. **πŸ‘₯ Add Additional Users (Optional)** + ```bash + # Example: Add another user to the admin group + # First, get the user's object ID + USER_EMAIL="colleague@yourdomain.com" + USER_OBJECT_ID=$(az ad user show --id $USER_EMAIL --query id -o tsv) + + # Add user to the group + az ad group member add --group "devopsthehardway-aks-group" --member-id $USER_OBJECT_ID + + # Verify addition + az ad group member list --group "devopsthehardway-aks-group" --query "[].{DisplayName:displayName,UserPrincipalName:userPrincipalName}" --output table + ``` + +11. **πŸ”’ Implement Security Best Practices (Recommended)** + ```bash + # Enable security features for the group (if available in your tenant) + # Note: Some features require Azure AD Premium + + # Check group security settings + az ad group show --group "devopsthehardway-aks-group" --query "{securityEnabled:securityEnabled,mailEnabled:mailEnabled}" + + # The group is automatically created as a security group + # Additional security configurations are typically done through Azure Portal + ``` + +## βœ… **Validation Steps** + +**πŸ” Group Creation Validation:** +- [ ] Azure AD group created with correct name +- [ ] Current user added as group member +- [ ] Group ID captured for future AKS configuration +- [ ] Group visible in Azure Portal +- [ ] Security group type enabled + +**πŸ”§ Technical Validation:** +```bash +# Comprehensive validation script +echo "πŸ‘₯ Validating Azure AD group setup..." + +GROUP_NAME="devopsthehardway-aks-group" +CURRENT_USER_ID=$(az ad signed-in-user show --query id -o tsv) + +# Check if group exists +if az ad group show --group "$GROUP_NAME" &>/dev/null; then + echo "βœ… Azure AD group exists" + + # Get group details + GROUP_ID=$(az ad group show --group "$GROUP_NAME" --query id -o tsv) + echo "πŸ“Š Group ID: $GROUP_ID" + + # Check if current user is a member + if az ad group member check --group "$GROUP_NAME" --member-id $CURRENT_USER_ID --query value -o tsv | grep -q "true"; then + echo "βœ… Current user is group member" + else + echo "❌ Current user is not a group member" + fi + + # Count group members + MEMBER_COUNT=$(az ad group member list --group "$GROUP_NAME" --query "length(@)") + echo "πŸ‘₯ Group members: $MEMBER_COUNT" + + echo "βœ… Azure AD group validation complete!" +else + echo "❌ Azure AD group not found" + exit 1 +fi +``` + +**πŸ“Š Security Checklist:** +- [ ] **Group Type** - Security group enabled +- [ ] **Membership** - Appropriate users added +- [ ] **Naming** - Descriptive and consistent naming convention +- [ ] **Documentation** - Group ID recorded for AKS configuration +- [ ] **Principle of Least Privilege** - Only necessary users have admin access + +## 🚨 **Troubleshooting Guide** + +**❌ Common Permission Issues:** +```bash +# Problem: Insufficient permissions to create groups +# Solution: Verify Azure AD role assignments +az role assignment list --assignee $(az ad signed-in-user show --query id -o tsv) --query "[?contains(roleDefinitionName, 'Administrator')]" + +# Problem: User Administrator role needed +# Solution: Request User Administrator or Global Administrator role +az ad directory-role list --query "[?displayName=='User Administrator' || displayName=='Global Administrator']" + +# Problem: Cannot add users to group +# Solution: Verify you have appropriate permissions +az ad group member add --group "$GROUP_NAME" --member-id $CURRENT_USER_ID --debug +``` + +**πŸ”§ Group Management Issues:** +```bash +# Problem: Group already exists +# Solution: Check existing group or use different name +az ad group list --filter "displayName eq 'devopsthehardway-aks-group'" --output table + +# Problem: User not found +# Solution: Verify user exists and check email address +az ad user show --id "user@domain.com" + +# Problem: Group ID not captured +# Solution: Retrieve group ID manually +GROUP_ID=$(az ad group show --group "devopsthehardway-aks-group" --query id -o tsv) +echo "Group ID: $GROUP_ID" +``` + +**🧹 Cleanup Commands:** +```bash +# Remove user from group +az ad group member remove --group "devopsthehardway-aks-group" --member-id $USER_OBJECT_ID + +# Delete the group (careful!) +az ad group delete --group "devopsthehardway-aks-group" +``` + +## πŸ’‘ **Knowledge Check** + +**🎯 Azure AD Integration Concepts:** +1. What are the benefits of using Azure AD groups vs individual user assignments? +2. How does Azure AD integration improve AKS security? +3. What's the difference between authentication and authorization in AKS? +4. How do Azure AD Conditional Access policies apply to AKS? + +**πŸ“ Answers:** +1. **Groups provide scalability**, easier management, consistent permissions, and simplified auditing +2. **Centralized identity management**, enterprise security features, MFA enforcement, and comprehensive logging +3. **Authentication** verifies user identity; **authorization** determines what actions are permitted in Kubernetes +4. **Conditional Access** can enforce device compliance, location restrictions, and risk-based access controls + +**πŸ” Advanced Applications:** +- **PIM Integration:** How would you implement just-in-time access for AKS admins? +- **Multiple Groups:** How would you design RBAC for different team roles? +- **Automation:** How could you automate group membership based on organizational changes? + +## 🎯 **Next Steps** + +**βœ… Upon Completion:** +- [ ] Azure AD group successfully created for AKS administrators +- [ ] Current user added as group member +- [ ] Group ID captured for AKS Terraform configuration +- [ ] Understanding of Azure AD RBAC integration +- [ ] Ready to configure AKS cluster with Azure AD authentication + +**➑️ Continue to:** [Create Azure Container Registry](../2-Terraform-AZURE-Services-Creation/1-Create-ACR.md) + +--- + +## πŸ“š **Additional Resources** + +- πŸ”— [AKS Azure AD Integration](https://docs.microsoft.com/en-us/azure/aks/azure-ad-integration-cli) +- πŸ”— [Azure AD Groups Management](https://docs.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-groups-create-azure-portal) +- πŸ”— [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) +- πŸ”— [Azure AD Privileged Identity Management](https://docs.microsoft.com/en-us/azure/active-directory/privileged-identity-management/) + +**🎯 Pro Tips:** +- **Use descriptive group names** that indicate purpose and access level +- **Implement regular access reviews** to ensure appropriate group membership +- **Consider multiple groups** for different access levels (admin, developer, viewer) +- **Document group purposes** and access levels for team reference + +## πŸ” Verification + +To ensure the group was created successfully: + +1. Log into the [Azure Portal](https://portal.azure.com) +2. Navigate to **Azure Active Directory > Groups** +3. Search for `devopsthehardway-aks-group` +4. Verify that your user account is listed as a member: + +![](images/azure-ad-group.png) + +## 🧠 Knowledge Check + +After running the script, consider these questions: + +1. Why is it beneficial to use Azure AD groups for AKS admin access? +2. How does this group-based access improve security compared to individual user access? +3. In what ways might you further modify the AD group for different levels of access? + +## πŸ’‘ Pro Tip + +Consider implementing these best practices for production environments: + +1. Create multiple AD groups with different levels of access (e.g., read-only, developer, admin) +2. Integrate with Privileged Identity Management (PIM) for just-in-time access +3. Implement regular access reviews to ensure appropriate access +4. Use Conditional Access policies to enforce multi-factor authentication + +Example of adding another user to the group: + +```bash +# Get object ID of user to add +USER_OBJECTID=$(az ad user show --id user@example.com --query id -o tsv) + +# Add user to the AKS admin group +az ad group member add --group devopsthehardway-aks-group --member-id $USER_OBJECTID +``` \ No newline at end of file diff --git a/1-Azure/README.md b/1-Azure/README.md new file mode 100644 index 0000000..117baf9 --- /dev/null +++ b/1-Azure/README.md @@ -0,0 +1,35 @@ +# Azure Setup for DevOps The Hard Way + +## Overview +This directory contains the foundational Azure setup needed for the DevOps The Hard Way - Azure project. These steps establish the core Azure resources that will be used throughout the tutorial. + +## Labs in this Section + +### [1. Configure Terraform Remote Storage](./1-Configure-Terraform-Remote-Storage.md) +Set up an Azure Storage Account to securely store your Terraform state files, which is essential for team collaboration and state management. + +### [2. Create Azure AD Group for AKS Admins](./2-Create-Azure-AD-Group-AKS-Admins.md) +Create an Azure Active Directory group to manage administrative access to your Kubernetes clusters with proper RBAC controls. + +## Scripts + +The `scripts` directory contains shell scripts that automate the setup process: + +- [`create-terraform-storage.sh`](./scripts/create-terraform-storage.sh): Creates a resource group, storage account, and blob container for Terraform state +- [`create-azure-ad-group.sh`](./scripts/create-azure-ad-group.sh): Creates an Azure AD Group for AKS administrators + +## Pre-requisites + +Before starting these labs, ensure you have: + +1. An Azure account with appropriate permissions +2. Azure CLI installed and configured (`az login`) +3. Basic familiarity with Azure services and Terraform concepts + +## Best Practices Applied + +- Resource naming conventions +- Security-enhanced storage configuration +- RBAC-based access control +- Infrastructure as Code for reproducibility +- Proper error handling in scripts diff --git a/1-Azure/images/azure-ad-group.png b/1-Azure/images/azure-ad-group.png new file mode 100644 index 0000000..2638d25 Binary files /dev/null and b/1-Azure/images/azure-ad-group.png differ diff --git a/1-Azure/images/storage-account.png b/1-Azure/images/storage-account.png new file mode 100644 index 0000000..a60eb87 Binary files /dev/null and b/1-Azure/images/storage-account.png differ diff --git a/1-Azure/scripts/1-create-terraform-storage.sh b/1-Azure/scripts/1-create-terraform-storage.sh new file mode 100755 index 0000000..70b6642 --- /dev/null +++ b/1-Azure/scripts/1-create-terraform-storage.sh @@ -0,0 +1,77 @@ +#!/bin/sh + +# Configuration +RESOURCE_GROUP_NAME="devopshardway-rg" +STORAGE_ACCOUNT_NAME="devopshardwaysa" +LOCATION="uksouth" +CONTAINER_NAME="tfstate" + +# Error handling function +handle_error() { + echo "ERROR: $1" + exit 1 +} + +# Verify Azure CLI is installed and user is logged in +if ! command -v az &> /dev/null; then + handle_error "Azure CLI is not installed. Please install it first: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli" +fi + +# Check if user is logged in +echo "Verifying Azure CLI login status..." +az account show &> /dev/null || handle_error "You are not logged in to Azure CLI. Please run 'az login' first." + +# Check if Resource Group exists +echo "Checking if resource group $RESOURCE_GROUP_NAME exists..." +RESOURCE_GROUP_EXISTS=$(az group exists --name $RESOURCE_GROUP_NAME) + +if [ "$RESOURCE_GROUP_EXISTS" = "true" ]; then + echo "Resource group $RESOURCE_GROUP_NAME already exists." +else + # Create Resource Group + echo "Creating resource group $RESOURCE_GROUP_NAME in $LOCATION..." + az group create -l $LOCATION -n $RESOURCE_GROUP_NAME --tags "Purpose=azure-devops-hardway" || handle_error "Failed to create resource group" +fi + +# Check if Storage Account exists +echo "Checking if storage account $STORAGE_ACCOUNT_NAME exists..." +STORAGE_ACCOUNT_EXISTS=$(az storage account check-name --name $STORAGE_ACCOUNT_NAME --query 'nameAvailable' --output tsv) + +if [ "$STORAGE_ACCOUNT_EXISTS" = "false" ]; then + echo "Storage account $STORAGE_ACCOUNT_NAME is already created in resource group $RESOURCE_GROUP_NAME." +else + # Create Storage Account with improved security settings + echo "Creating storage account $STORAGE_ACCOUNT_NAME..." + az storage account create \ + -n $STORAGE_ACCOUNT_NAME \ + -g $RESOURCE_GROUP_NAME \ + -l $LOCATION \ + --sku Standard_LRS \ + --encryption-services blob \ + --min-tls-version TLS1_2 \ + --allow-blob-public-access false \ + --tags "Purpose=azure-devops-hardway" || handle_error "Failed to create storage account" + + # Create Storage Account blob container + echo "Creating blob container $CONTAINER_NAME..." + az storage container create \ + --name $CONTAINER_NAME \ + --account-name $STORAGE_ACCOUNT_NAME \ + --auth-mode login || handle_error "Failed to create blob container" + + # Output the access key (in a real environment, consider using managed identities instead) + echo "Retrieving storage account key..." + ACCOUNT_KEY=$(az storage account keys list --resource-group $RESOURCE_GROUP_NAME --account-name $STORAGE_ACCOUNT_NAME --query '[0].value' -o tsv) + + echo "Configuration for terraform backend:" + echo "terraform {" + echo " backend \"azurerm\" {" + echo " resource_group_name = \"$RESOURCE_GROUP_NAME\"" + echo " storage_account_name = \"$STORAGE_ACCOUNT_NAME\"" + echo " container_name = \"$CONTAINER_NAME\"" + echo " key = \"terraform.tfstate\"" + echo " }" + echo "}" + + echo "Setup complete!" +fi \ No newline at end of file diff --git a/1-Azure/scripts/2-create-azure-ad-group.sh b/1-Azure/scripts/2-create-azure-ad-group.sh new file mode 100755 index 0000000..6c3c220 --- /dev/null +++ b/1-Azure/scripts/2-create-azure-ad-group.sh @@ -0,0 +1,58 @@ +#!/bin/sh + +# Configuration +AZURE_AD_GROUP_NAME="devopsthehardway-aks-group" + +# Error handling function +handle_error() { + echo "ERROR: $1" + exit 1 +} + +# Verify Azure CLI is installed and user is logged in +if ! command -v az &> /dev/null; then + handle_error "Azure CLI is not installed. Please install it first: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli" +fi + +# Check if user is logged in +echo "Verifying Azure CLI login status..." +az account show &> /dev/null || handle_error "You are not logged in to Azure CLI. Please run 'az login' first." + +echo "Retrieving current user Object ID..." +CURRENT_USER_OBJECTID=$(az ad signed-in-user show --query id -o tsv) || handle_error "Failed to retrieve current user Object ID" + +# Check if Azure AD Group exists +echo "Checking if Azure AD Group $AZURE_AD_GROUP_NAME exists..." +GROUP_EXISTS=$(az ad group list --filter "displayName eq '$AZURE_AD_GROUP_NAME'" --query "[].displayName" -o tsv) + +if [ "$GROUP_EXISTS" = "$AZURE_AD_GROUP_NAME" ]; then + echo "Azure AD group $AZURE_AD_GROUP_NAME already exists." +else + # Create Azure AD Group with description + echo "Creating Azure AD group $AZURE_AD_GROUP_NAME..." + az ad group create \ + --display-name $AZURE_AD_GROUP_NAME \ + --mail-nickname $AZURE_AD_GROUP_NAME \ + --description "Administrators for AKS clusters with full kubectl access" || handle_error "Failed to create Azure AD group" +fi + +# Check if Current User is already a member of the Azure AD Group +echo "Checking if current user is a member of $AZURE_AD_GROUP_NAME..." +USER_IN_GROUP=$(az ad group member check --group $AZURE_AD_GROUP_NAME --member-id $CURRENT_USER_OBJECTID --query value -o tsv) + +if [ "$USER_IN_GROUP" = "true" ]; then + echo "Current user is already a member of the Azure AD group $AZURE_AD_GROUP_NAME." +else + # Add Current az login user to Azure AD Group + echo "Adding current user to Azure AD group $AZURE_AD_GROUP_NAME..." + az ad group member add --group $AZURE_AD_GROUP_NAME --member-id $CURRENT_USER_OBJECTID || handle_error "Failed to add current user to Azure AD group" +fi + +echo "Retrieving Azure AD Group ID..." +AZURE_GROUP_ID=$(az ad group show --group $AZURE_AD_GROUP_NAME --query id -o tsv) || handle_error "Failed to retrieve Azure AD Group ID" + +echo "βœ… Setup complete!" +echo "===========================================================================" +echo " AZURE AD GROUP ID: $AZURE_GROUP_ID" +echo " You'll need this ID for AKS Terraform configurations" +echo "===========================================================================" \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/1-Create-ACR.md b/2-Terraform-AZURE-Services-Creation/1-Create-ACR.md new file mode 100644 index 0000000..613752b --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/1-Create-ACR.md @@ -0,0 +1,227 @@ +# Create an Azure Container Registry Repository + +## 🎯 **Tutorial Overview** +**Estimated Time:** ⏱️ **15-20 minutes** +**Prerequisites Level:** Basic Azure and Terraform knowledge + +In this lab, you'll create a repository in Azure Container Registry (ACR) to store the Docker image for the thomasthornton.cloud app. + +### πŸ“‹ **Learning Objectives** +By the end of this tutorial, you will: +- [ ] Understand Azure Container Registry fundamentals +- [ ] Create ACR infrastructure using Terraform +- [ ] Configure Terraform backend for state management +- [ ] Apply Azure resource tagging best practices +- [ ] Validate ACR deployment and functionality + +### ⚠️ **Important Notes** +- Ensure your ACR name is globally unique (Azure requirement) +- Standard SKU provides good balance of features and cost +- Backend state storage must be configured before running Terraform + +## πŸ› οΈ Create the ACR Terraform Configuration + +### βœ… **Prerequisites Checklist** +Before starting, ensure you have: +- [ ] Terraform installed (version 1.14.8 or later) +- [ ] Azure CLI installed and configured (`az login` completed) +- [ ] Storage account for Terraform state already created (from 1-Azure section) +- [ ] Basic understanding of Terraform and ACR concepts +- [ ] Text editor or IDE for configuration files + +### πŸ“š **Background Knowledge** +**What is Azure Container Registry?** +- Private Docker registry service in Azure +- Stores and manages container images +- Integrates with Azure Kubernetes Service (AKS) +- Provides security scanning and access control + +## πŸš€ **Step-by-Step Implementation** + +### **Step 1: Review and Customize Configuration** ⏱️ *5 minutes* +1. **πŸ“ Update Terraform Variables** + - Open the [terraform.tfvars](https://github.com/thomast1906/DevOps-The-Hard-Way-Azure/tree/main/2-Terraform-AZURE-Services-Creation/1-acr/terraform.tfvars) file + - Ensure all values are accurate for your environment and **globally unique** + + ```hcl + # Example customization + name = "mycompanyacr2024" # Must be globally unique! + location = "uksouth" # Change if desired + ``` + + **βœ… Validation:** Check that your ACR name is unique by running: + ```bash + az acr check-name --name "your-acr-name" + ``` + +### **Step 2: Understand the Infrastructure** ⏱️ *5 minutes* +2. **πŸ” Review Terraform Configuration** + + **πŸ“š What this configuration creates:** + - [ ] **Resource Group** - Container for all ACR resources + - [ ] **Container Registry** - Private Docker image repository + - [ ] **Terraform Backend** - Remote state storage in Azure + - [ ] **Resource Tags** - Metadata for organization and cost tracking + + **🎯 Key Features:** + - **SKU:** Standard (good balance of features and cost) + - **Admin Access:** Enabled for development scenarios + - **Location:** UK South (configurable) + - **Retention:** Configured for compliance + + Review the [ACR Terraform configuration](https://github.com/thomast1906/DevOps-The-Hard-Way-Azure/tree/main/2-Terraform-AZURE-Services-Creation/1-acr) to understand: + - [ ] Resource definitions and relationships + - [ ] Output values for use in other modules + - [ ] Variable usage and defaults + +### **Step 3: Deploy the Infrastructure** ⏱️ *10 minutes* +3. **πŸ—οΈ Create the ACR** + + **πŸ“‚ Navigate to the configuration directory:** + ```bash + cd 2-Terraform-AZURE-Services-Creation/1-acr + ``` + + **πŸ”§ Initialize Terraform:** + ```bash + terraform init + ``` + **βœ… Expected Result:** Terraform downloads providers and configures backend + + **πŸ“‹ Plan the deployment:** + ```bash + terraform plan + ``` + **βœ… Expected Result:** Shows resources to be created (should show ~2-3 resources) + + **πŸš€ Apply the configuration:** + ```bash + terraform apply + ``` + **βœ… Expected Result:** ACR and resource group created successfully + +## βœ… **Validation & Testing** + +### **Step 4: Verify Deployment** ⏱️ *5 minutes* +**πŸ” Validate your ACR deployment:** + +1. **Azure Portal Verification:** + - [ ] Log into the [Azure Portal](https://portal.azure.com) + - [ ] Navigate to [Container Registries](https://portal.azure.com/#browse/Microsoft.ContainerRegistry%2Fregistries) + - [ ] Locate your newly created ACR + - [ ] Verify the following properties: + - **Status:** Available + - **SKU:** Standard + - **Location:** Matches your configuration + - **Admin user:** Enabled + +2. **Azure CLI Verification:** + ```bash + # Check ACR exists and is accessible + az acr list --query "[?name=='your-acr-name']" --output table + + # Verify login capability + az acr login --name your-acr-name + ``` + **βœ… Expected Result:** Login successful message + +3. **Terraform State Verification:** + ```bash + # View created resources + terraform show + + # Check outputs + terraform output + ``` + +**πŸ“Έ Expected Result:** +![ACR in Azure Portal](images/1-acr.png) + +### **οΏ½ Functionality Test** +**Test ACR basic functionality:** +```bash +# Pull a test image and push to your ACR +docker pull hello-world +docker tag hello-world your-acr-name.azurecr.io/hello-world:test +docker push your-acr-name.azurecr.io/hello-world:test +``` + +## 🚨 **Troubleshooting Guide** + +### **Common Issues & Solutions** + +| ❌ **Problem** | πŸ”§ **Solution** | +|----------------|-----------------| +| ACR name already exists | Choose a globally unique name (try adding numbers/date) | +| Authentication failed | Run `az login` and verify subscription access | +| Terraform backend error | Ensure storage account exists and you have access | +| Permission denied | Verify Azure CLI is logged in with correct permissions | +| Plan shows no changes | Check if resources already exist or variables are correct | + +### **πŸ†˜ Detailed Troubleshooting** + +**Issue: "ACR name not available"** +```bash +# Check name availability +az acr check-name --name "your-proposed-name" + +# Generate unique name +echo "mycompanyacr$(date +%Y%m%d)" +``` + +**Issue: "Backend initialization failed"** +```bash +# Verify storage account exists +az storage account show --name "your-storage-account" --resource-group "your-rg" + +# Check access keys +az storage account keys list --account-name "your-storage-account" +``` + +## πŸŽ“ **Knowledge Check Questions** + +Test your understanding: + +- [ ] **Question 1:** Why is it beneficial to use Terraform for creating cloud resources like ACR? +
+ πŸ’‘ Answer + Infrastructure as Code provides version control, repeatability, and team collaboration. Changes are tracked, and infrastructure can be recreated consistently. +
+ +- [ ] **Question 2:** How does storing Terraform state in Azure Storage help in team environments? +
+ πŸ’‘ Answer + Remote state allows multiple team members to work on the same infrastructure, provides state locking to prevent conflicts, and ensures state persistence. +
+ +- [ ] **Question 3:** What are the advantages of using ACR over public registries? +
+ πŸ’‘ Answer + Private access control, integration with Azure services, geo-replication, vulnerability scanning, and compliance with corporate policies. +
+ +## 🎯 **Achievement Unlocked!** +πŸ† **Container Registry Master** - You've successfully created your first Azure Container Registry using Infrastructure as Code! + +### **What You've Accomplished:** +- [x] Created ACR infrastructure with Terraform +- [x] Configured remote state management +- [x] Applied Azure resource tagging +- [x] Validated deployment functionality +- [x] Gained troubleshooting skills + +### **Next Steps:** +- [ ] Proceed to [Create VNET](./2-Create-VNET.md) +- [ ] Learn about [Docker image creation](../3-Docker/1-Create-Docker-Image.md) + +## πŸ’‘ **Pro Tips & Best Practices** +Consider implementing these additional security and operational best practices for your ACR: + +1. **Enhanced Security**: + - Enable content trust for image signing: `admin_enabled = false` (already set) + - Configure private link endpoints to restrict network access + - Use Managed Identity for authentication instead of admin credentials + +2. **Cost Optimisation**: + - Monitor image usage and implement retention policies + - Use Premium SKU only if you need geo-replication or other advanced features diff --git a/2-Terraform-AZURE-Services-Creation/1-acr/README.md b/2-Terraform-AZURE-Services-Creation/1-acr/README.md new file mode 100644 index 0000000..561d86a --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/1-acr/README.md @@ -0,0 +1,37 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.11 | +| [azurerm](#requirement\_azurerm) | >= 4.27.0 | + +## Providers + +| Name | Version | +|------|---------| +| [azurerm](#provider\_azurerm) | >= 4.27.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [azurerm_container_registry.acr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/container_registry) | resource | +| [azurerm_resource_group.acr_resource_group](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/resource_group) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [location](#input\_location) | Azure Location of resources | `string` | `"uksouth"` | no | +| [name](#input\_name) | Name for resources | `string` | `"devopsthehardway"` | no | +| [tags](#input\_tags) | n/a | `map(string)` | n/a | yes | + +## Outputs + +No outputs. + \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/1-acr/acr.tf b/2-Terraform-AZURE-Services-Creation/1-acr/acr.tf new file mode 100644 index 0000000..f054631 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/1-acr/acr.tf @@ -0,0 +1,18 @@ +resource "azurerm_resource_group" "acr_resource_group" { + name = "${var.name}-rg" + location = var.location + + tags = var.tags + +} + +resource "azurerm_container_registry" "acr" { + name = "${var.name}azurecr" + resource_group_name = azurerm_resource_group.acr_resource_group.name + location = azurerm_resource_group.acr_resource_group.location + sku = "Standard" + admin_enabled = false + + tags = var.tags + +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/1-acr/providers.tf b/2-Terraform-AZURE-Services-Creation/1-acr/providers.tf new file mode 100644 index 0000000..bf1ef20 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/1-acr/providers.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.14.0, < 2.0.0" + backend "azurerm" { + resource_group_name = "devopshardway-rg" + storage_account_name = "devopshardwaysa" + container_name = "tfstate" + key = "acr-terraform.tfstate" + } + + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = ">= 4.68.0, < 5.0.0" + } + } +} + +provider "azurerm" { + features {} + subscription_id = "04109105-f3ca-44ac-a3a7-66b4936112c3" + +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/1-acr/terraform.tfvars b/2-Terraform-AZURE-Services-Creation/1-acr/terraform.tfvars new file mode 100644 index 0000000..de58302 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/1-acr/terraform.tfvars @@ -0,0 +1,9 @@ +name = "devopsthehardway" +location = "uksouth" + +tags = { + "Purpose" = "azure-devops-hardway" + "Environment" = "DevOps" + "DeployedBy" = "Terraform" + "Project" = "devopsthehardway" +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/1-acr/variables.tf b/2-Terraform-AZURE-Services-Creation/1-acr/variables.tf new file mode 100644 index 0000000..713e2be --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/1-acr/variables.tf @@ -0,0 +1,15 @@ +variable "name" { + type = string + default = "devopsthehardway" + description = "Name for resources" +} + +variable "location" { + type = string + default = "uksouth" + description = "Azure Location of resources" +} + +variable "tags" { + type = map(string) +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/2-Create-VNET.md b/2-Terraform-AZURE-Services-Creation/2-Create-VNET.md new file mode 100644 index 0000000..7967d32 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/2-Create-VNET.md @@ -0,0 +1,296 @@ +# 🌐 Create an Azure VNET + +> **Estimated Time:** ⏱️ **25-30 minutes** + +## 🎯 **Learning Objectives** + +By the end of this tutorial, you will: +- [ ] **Create Azure VNET** with properly segmented subnets +- [ ] **Configure Network Security Groups** for enhanced security +- [ ] **Deploy Application Gateway for Containers** for load balancing +- [ ] **Understand Azure networking** concepts and best practices +- [ ] **Validate network infrastructure** through Azure Portal + +## πŸ“‹ **Prerequisites** + +**βœ… Required Knowledge:** +- [ ] Basic understanding of Azure networking concepts (VNETs, subnets, NSGs) +- [ ] Familiarity with Terraform configuration files +- [ ] Azure CLI authentication completed + +**πŸ”§ Required Tools:** +- [ ] Terraform CLI installed and configured +- [ ] Azure CLI with active subscription +- [ ] Access to Azure subscription with Contributor permissions +- [ ] Completed: [Configure Terraform Remote Storage](../1-Azure/1-Configure-Terraform-Remote-Storage.md) + +**πŸ—οΈ Infrastructure Dependencies:** +- [ ] Azure Storage Account for Terraform state (from previous tutorial) +- [ ] Resource group for VNET resources + +## πŸš€ **Step-by-Step Implementation** + +### **Step 1: Review Terraform Configuration** ⏱️ *8 minutes* + +1. **πŸ“‚ Navigate to VNET Directory** + ```bash + cd 2-Terraform-AZURE-Services-Creation/2-vnet + ``` + +2. **πŸ“‹ Review terraform.tfvars Configuration** + ```bash + cat terraform.tfvars + ``` + **πŸ” Key Variables to Verify:** + - [ ] `location` - Azure region (default: uksouth) + - [ ] `resource_group_name` - Target resource group + - [ ] `vnet_address_space` - CIDR block for VNET + - [ ] `subnet_configurations` - Subnet definitions + +3. **πŸ—οΈ Understand Infrastructure Components** + + **πŸ“„ vnet.tf - Virtual Network:** + ```hcl + # Creates VNET with address space + resource "azurerm_virtual_network" "vnet" { + name = var.vnet_name + address_space = [var.vnet_address_space] + location = var.location + resource_group_name = var.resource_group_name + } + + # Creates subnets for different workloads + resource "azurerm_subnet" "subnets" { + for_each = var.subnet_configurations + name = each.key + resource_group_name = var.resource_group_name + virtual_network_name = azurerm_virtual_network.vnet.name + address_prefixes = [each.value.address_prefix] + } + ``` + + **πŸ›‘οΈ nsg.tf - Network Security Groups:** + ```hcl + # Creates NSG with security rules + resource "azurerm_network_security_group" "nsg" { + name = "${var.vnet_name}-nsg" + location = var.location + resource_group_name = var.resource_group_name + } + + # Associates NSG with subnets + resource "azurerm_subnet_network_security_group_association" "nsg_association" { + for_each = var.subnet_configurations + subnet_id = azurerm_subnet.subnets[each.key].id + network_security_group_id = azurerm_network_security_group.nsg.id + } + ``` + + **βš–οΈ alb.tf - Application Load Balancer:** + ```hcl + # Creates Application Gateway for Containers + resource "azurerm_application_load_balancer" "alb" { + name = "${var.vnet_name}-alb" + location = var.location + resource_group_name = var.resource_group_name + } + ``` + +### **Step 2: Initialize Terraform** ⏱️ *3 minutes* + +4. **πŸ”§ Initialize Terraform Backend** + ```bash + terraform init + ``` + **βœ… Expected Output:** + ``` + Initializing the backend... + Successfully configured the backend "azurerm"! + Initializing provider plugins... + Terraform has been successfully initialized! + ``` + +5. **πŸ“‹ Validate Configuration** + ```bash + terraform validate + ``` + **βœ… Expected:** "Success! The configuration is valid." + +### **Step 3: Plan and Deploy Infrastructure** ⏱️ *10 minutes* + +6. **πŸ“Š Review Deployment Plan** + ```bash + terraform plan + ``` + **⏱️ Planning Time:** 1-2 minutes + **πŸ” Review Output:** Look for resource creation count (typically 8-12 resources) + +7. **πŸš€ Deploy Infrastructure** + ```bash + terraform apply + ``` + **⏱️ Deployment Time:** 5-8 minutes + **πŸ’‘ Tip:** Type `yes` when prompted to confirm deployment + + **βœ… Expected Completion Message:** + ``` + Apply complete! Resources: 10 added, 0 changed, 0 destroyed. + ``` + +### **Step 4: Verify Deployment** ⏱️ *5 minutes* + +8. **🌐 Check Azure Portal** + - Navigate to [Azure Portal](https://portal.azure.com) + - Go to your resource group + - Verify these resources are created: + - [ ] Virtual Network with correct address space + - [ ] Subnets with proper CIDR blocks + - [ ] Network Security Group with associations + - [ ] Application Load Balancer + +9. **πŸ“‹ Verify Using Azure CLI** + ```bash + # List VNET details + az network vnet list --resource-group --output table + + # Check subnets + az network vnet subnet list --vnet-name --resource-group --output table + + # Verify NSG associations + az network nsg list --resource-group --output table + ``` + +## βœ… **Validation Steps** + +**πŸ” Infrastructure Validation:** +- [ ] VNET created with correct address space (e.g., 10.0.0.0/16) +- [ ] Subnets properly segmented with non-overlapping CIDR blocks +- [ ] NSG created and associated with all subnets +- [ ] Application Load Balancer deployed successfully +- [ ] All resources in the specified region + +**πŸ”§ Technical Validation:** +```bash +# Comprehensive validation script +echo "πŸ” Validating VNET deployment..." + +# Check if VNET exists +VNET_NAME=$(terraform output -raw vnet_name 2>/dev/null || echo "devops-vnet") +RG_NAME=$(terraform output -raw resource_group_name 2>/dev/null || echo "devops-rg") + +if az network vnet show --name $VNET_NAME --resource-group $RG_NAME &>/dev/null; then + echo "βœ… VNET exists" + + # Check subnet count + SUBNET_COUNT=$(az network vnet subnet list --vnet-name $VNET_NAME --resource-group $RG_NAME --query "length(@)") + echo "πŸ“Š Subnets created: $SUBNET_COUNT" + + # Check NSG associations + NSG_COUNT=$(az network nsg list --resource-group $RG_NAME --query "length(@)") + echo "πŸ›‘οΈ NSGs created: $NSG_COUNT" + + # Check ALB + ALB_COUNT=$(az network application-gateway list --resource-group $RG_NAME --query "length(@)" 2>/dev/null || echo "0") + echo "βš–οΈ Load Balancers: $ALB_COUNT" + + echo "βœ… VNET validation complete!" +else + echo "❌ VNET validation failed" + exit 1 +fi +``` + +**πŸ“Š Resource Inventory:** +- [ ] **Virtual Network** - Main network container +- [ ] **Subnets** - Network segments for different workloads +- [ ] **Network Security Groups** - Firewall rules for traffic control +- [ ] **ALB Subnet Association** - Load balancer network binding +- [ ] **ALB Frontend Configuration** - Load balancer front-end setup + +## 🚨 **Troubleshooting Guide** + +**❌ Common Terraform Issues:** +```bash +# Problem: Backend initialization fails +# Solution: Verify storage account and container exist +az storage account show --name --resource-group + +# Problem: Address space conflicts +# Solution: Check for overlapping CIDR blocks +terraform plan | grep "address_prefixes" + +# Problem: Permission errors +# Solution: Verify Azure CLI authentication and permissions +az account show +az role assignment list --assignee $(az account show --query user.name -o tsv) +``` + +**πŸ”§ Network Configuration Issues:** +```bash +# Problem: Subnet creation fails +# Solution: Verify address space doesn't overlap +az network vnet show --name --resource-group --query "addressSpace" + +# Problem: NSG association fails +# Solution: Check if subnet is already associated with another NSG +az network vnet subnet show --name --vnet-name --resource-group --query "networkSecurityGroup" + +# Problem: ALB deployment fails +# Solution: Verify subnet has sufficient address space +az network vnet subnet list --vnet-name --resource-group --query "[].{Name:name,AddressPrefix:addressPrefix,AvailableIPs:availableIpAddressCount}" +``` + +**🧹 Cleanup Commands:** +```bash +# Remove specific resources if deployment fails partially +terraform destroy -target=azurerm_application_load_balancer.alb +terraform destroy -target=azurerm_network_security_group.nsg + +# Complete cleanup +terraform destroy +``` + +## πŸ’‘ **Knowledge Check** + +**🎯 Architecture Questions:** +1. What's the difference between a VNET and a subnet? +2. Why do we use Network Security Groups? +3. How does Application Gateway for Containers differ from traditional load balancers? +4. What are the benefits of subnet segmentation? + +**πŸ“ Answers:** +1. **VNET** is a logical network container; **subnets** are segments within the VNET for workload isolation +2. **NSGs** act as virtual firewalls, controlling inbound/outbound traffic at subnet and NIC levels +3. **AGfC** provides Layer 7 load balancing with advanced routing, SSL termination, and WAF capabilities +4. **Segmentation** improves security, simplifies management, and enables granular access control + +**πŸ” Technical Deep Dive:** +- **Address Planning:** How would you design subnets for a multi-tier application? +- **Security:** What NSG rules would you implement for a web application? +- **Scalability:** How does proper network design support future growth? + +## 🎯 **Next Steps** + +**βœ… Upon Completion:** +- [ ] Azure VNET successfully created and validated +- [ ] Network Security Groups configured and associated +- [ ] Application Load Balancer deployed +- [ ] Understanding of Azure networking fundamentals +- [ ] Ready for Log Analytics workspace creation + +**➑️ Continue to:** [Create Log Analytics](./3-Create-Log-Analytics.md) + +--- + +## πŸ“š **Additional Resources** + +- πŸ”— [Azure VNET Documentation](https://docs.microsoft.com/en-us/azure/virtual-network/) +- πŸ”— [Network Security Groups Best Practices](https://docs.microsoft.com/en-us/azure/virtual-network/network-security-groups-overview) +- πŸ”— [Application Gateway for Containers](https://docs.microsoft.com/en-us/azure/application-gateway/) +- πŸ”— [Azure Network Architecture](https://docs.microsoft.com/en-us/azure/architecture/networking/) + +**🎯 Pro Tips:** +- Use **consistent naming conventions** for network resources +- Plan **address spaces** carefully to avoid conflicts with on-premises networks +- Implement **least privilege** principle in NSG rules +- Consider **Network Watcher** for monitoring and diagnostics diff --git a/2-Terraform-AZURE-Services-Creation/2-vnet/README.md b/2-Terraform-AZURE-Services-Creation/2-vnet/README.md new file mode 100644 index 0000000..d2c5654 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/2-vnet/README.md @@ -0,0 +1,50 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.11 | +| [azurerm](#requirement\_azurerm) | >= 4.27.0 | + +## Providers + +| Name | Version | +|------|---------| +| [azurerm](#provider\_azurerm) | >= 4.27.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [azurerm_application_load_balancer.alb](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/application_load_balancer) | resource | +| [azurerm_application_load_balancer_frontend.example](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/application_load_balancer_frontend) | resource | +| [azurerm_application_load_balancer_subnet_association.alb](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/application_load_balancer_subnet_association) | resource | +| [azurerm_network_security_group.nsg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/network_security_group) | resource | +| [azurerm_subnet.aks_subnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/subnet) | resource | +| [azurerm_subnet.app_gwsubnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/subnet) | resource | +| [azurerm_subnet_network_security_group_association.aks_subnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/subnet_network_security_group_association) | resource | +| [azurerm_subnet_network_security_group_association.app_gwsubnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/subnet_network_security_group_association) | resource | +| [azurerm_virtual_network.virtual_network](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/virtual_network) | resource | +| [azurerm_resource_group.resource_group](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aks\_subnet\_address\_name](#input\_aks\_subnet\_address\_name) | AKS Subnet Address Name | `string` | n/a | yes | +| [aks\_subnet\_address\_prefix](#input\_aks\_subnet\_address\_prefix) | AKS Subnet Address Space | `string` | n/a | yes | +| [location](#input\_location) | Azure Location of resources | `string` | `"uksouth"` | no | +| [name](#input\_name) | Name for resources | `string` | `"devopsthehardway"` | no | +| [network\_address\_space](#input\_network\_address\_space) | Azure VNET Address Space | `string` | n/a | yes | +| [subnet\_address\_name](#input\_subnet\_address\_name) | Subnet Address Name | `string` | n/a | yes | +| [subnet\_address\_prefix](#input\_subnet\_address\_prefix) | Subnet Address Space | `string` | n/a | yes | +| [tags](#input\_tags) | n/a | `map(string)` | n/a | yes | + +## Outputs + +No outputs. + \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/2-vnet/alb.tf b/2-Terraform-AZURE-Services-Creation/2-vnet/alb.tf new file mode 100644 index 0000000..c21fb83 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/2-vnet/alb.tf @@ -0,0 +1,19 @@ +# Azure Application Load Balancer for Containers +resource "azurerm_application_load_balancer" "alb" { + name = "devopsthehardway-alb" + location = var.location + resource_group_name = data.azurerm_resource_group.resource_group.name + + tags = var.tags +} + +resource "azurerm_application_load_balancer_subnet_association" "alb" { + name = "alb-subnet-association" + application_load_balancer_id = azurerm_application_load_balancer.alb.id + subnet_id = azurerm_subnet.app_gwsubnet.id +} + +resource "azurerm_application_load_balancer_frontend" "example" { + name = "alb-frontend" + application_load_balancer_id = azurerm_application_load_balancer.alb.id +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/2-vnet/data.tf b/2-Terraform-AZURE-Services-Creation/2-vnet/data.tf new file mode 100644 index 0000000..2c8d541 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/2-vnet/data.tf @@ -0,0 +1,3 @@ +data "azurerm_resource_group" "resource_group" { + name = "${var.name}-rg" +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/2-vnet/nsg.tf b/2-Terraform-AZURE-Services-Creation/2-vnet/nsg.tf new file mode 100644 index 0000000..cee847c --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/2-vnet/nsg.tf @@ -0,0 +1,53 @@ +resource "azurerm_network_security_group" "nsg" { + name = "devopsthehardway-nsg" + location = var.location + resource_group_name = data.azurerm_resource_group.resource_group.name + tags = var.tags + + security_rule { + name = "Allow-HTTP-Inbound" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "80" + source_address_prefix = "Internet" + destination_address_prefix = "*" + } + + security_rule { + name = "Allow-HTTPS-Inbound" + priority = 110 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "Internet" + destination_address_prefix = "*" + } + + # Required for Azure Load Balancer health probes (AGC infrastructure) + security_rule { + name = "Allow-AzureLoadBalancer-Inbound" + priority = 120 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "AzureLoadBalancer" + destination_address_prefix = "*" + } +} + +resource "azurerm_subnet_network_security_group_association" "aks_subnet" { + subnet_id = azurerm_subnet.aks_subnet.id + network_security_group_id = azurerm_network_security_group.nsg.id +} + +resource "azurerm_subnet_network_security_group_association" "app_gwsubnet" { + subnet_id = azurerm_subnet.app_gwsubnet.id + network_security_group_id = azurerm_network_security_group.nsg.id +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/2-vnet/providers.tf b/2-Terraform-AZURE-Services-Creation/2-vnet/providers.tf new file mode 100644 index 0000000..541026e --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/2-vnet/providers.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.14.0, < 2.0.0" + backend "azurerm" { + resource_group_name = "devopshardway-rg" + storage_account_name = "devopshardwaysa" + container_name = "tfstate" + key = "vnet-terraform.tfstate" + } + + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = ">= 4.68.0, < 5.0.0" + } + } +} + +provider "azurerm" { + features {} + subscription_id = "04109105-f3ca-44ac-a3a7-66b4936112c3" + +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/2-vnet/terraform.tfvars b/2-Terraform-AZURE-Services-Creation/2-vnet/terraform.tfvars new file mode 100644 index 0000000..4fdeff9 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/2-vnet/terraform.tfvars @@ -0,0 +1,14 @@ +name = "devopsthehardway" +location = "uksouth" +network_address_space = "192.168.0.0/16" +aks_subnet_address_name = "aks" +aks_subnet_address_prefix = "192.168.0.0/24" +subnet_address_name = "appgw" +subnet_address_prefix = "192.168.4.0/24" + +tags = { + "Purpose" = "azure-devops-hardway" + "Environment" = "DevOps" + "DeployedBy" = "Terraform" + "Project" = "devopsthehardway" +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/2-vnet/variables.tf b/2-Terraform-AZURE-Services-Creation/2-vnet/variables.tf new file mode 100644 index 0000000..cbc32ee --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/2-vnet/variables.tf @@ -0,0 +1,40 @@ +variable "name" { + type = string + default = "devopsthehardway" + description = "Name for resources" +} + +variable "location" { + type = string + default = "uksouth" + description = "Azure Location of resources" +} + +variable "network_address_space" { + type = string + description = "Azure VNET Address Space" +} + +variable "aks_subnet_address_name" { + type = string + description = "AKS Subnet Address Name" +} + +variable "aks_subnet_address_prefix" { + type = string + description = "AKS Subnet Address Space" +} + +variable "subnet_address_name" { + type = string + description = "Subnet Address Name" +} + +variable "subnet_address_prefix" { + type = string + description = "Subnet Address Space" +} + +variable "tags" { + type = map(string) +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/2-vnet/vnet.tf b/2-Terraform-AZURE-Services-Creation/2-vnet/vnet.tf new file mode 100644 index 0000000..1b50580 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/2-vnet/vnet.tf @@ -0,0 +1,34 @@ + +resource "azurerm_virtual_network" "virtual_network" { + name = "${var.name}-vnet" + location = var.location + resource_group_name = data.azurerm_resource_group.resource_group.name + address_space = [var.network_address_space] + + tags = var.tags + +} + +resource "azurerm_subnet" "aks_subnet" { + name = var.aks_subnet_address_name + resource_group_name = data.azurerm_resource_group.resource_group.name + virtual_network_name = azurerm_virtual_network.virtual_network.name + address_prefixes = [var.aks_subnet_address_prefix] +} + +resource "azurerm_subnet" "app_gwsubnet" { + name = var.subnet_address_name + resource_group_name = data.azurerm_resource_group.resource_group.name + virtual_network_name = azurerm_virtual_network.virtual_network.name + address_prefixes = [var.subnet_address_prefix] + + # Required delegation for Application Gateway (Service Networking) + delegation { + name = "delegation" + + service_delegation { + name = "Microsoft.ServiceNetworking/trafficControllers" + actions = ["Microsoft.Network/virtualNetworks/subnets/join/action"] + } + } +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/3-Create-Log-Analytics.md b/2-Terraform-AZURE-Services-Creation/3-Create-Log-Analytics.md new file mode 100644 index 0000000..79d8708 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/3-Create-Log-Analytics.md @@ -0,0 +1,301 @@ +# πŸ“Š Create an Azure Log Analytics Workspace + +> **Estimated Time:** ⏱️ **15-20 minutes** + +## 🎯 **Learning Objectives** + +By the end of this tutorial, you will: +- [ ] **Deploy Log Analytics Workspace** for centralized logging +- [ ] **Enable Container Insights** for AKS monitoring +- [ ] **Configure monitoring solutions** for comprehensive observability +- [ ] **Understand logging architecture** and data retention policies +- [ ] **Validate workspace deployment** through Azure Portal + +## πŸ“‹ **Prerequisites** + +**βœ… Required Knowledge:** +- [ ] Basic understanding of Azure monitoring concepts +- [ ] Familiarity with logging and observability principles +- [ ] Terraform configuration fundamentals + +**πŸ”§ Required Tools:** +- [ ] Terraform CLI installed and configured +- [ ] Azure CLI with active subscription +- [ ] Access to Azure subscription with Contributor permissions +- [ ] Completed: [Create Azure VNET](./2-Create-VNET.md) + +**πŸ—οΈ Infrastructure Dependencies:** +- [ ] Azure Storage Account for Terraform state +- [ ] Resource group for Log Analytics resources +- [ ] VNET infrastructure (for future AKS integration) + +## πŸš€ **Step-by-Step Implementation** + +### **Step 1: Review Configuration and Setup** ⏱️ *5 minutes* + +1. **πŸ“‚ Navigate to Log Analytics Directory** + ```bash + cd 2-Terraform-AZURE-Services-Creation/3-log-analytics + ``` + +2. **πŸ“‹ Review terraform.tfvars Configuration** + ```bash + cat terraform.tfvars + ``` + **πŸ” Key Variables to Verify:** + - [ ] `location` - Azure region (should match VNET) + - [ ] `resource_group_name` - Target resource group + - [ ] `log_analytics_workspace_name` - Unique workspace name + - [ ] `sku` - Pricing tier (typically "PerGB2018") + - [ ] `retention_in_days` - Data retention period (30-730 days) + +3. **πŸ—οΈ Understand Infrastructure Components** + + **πŸ“„ la.tf - Log Analytics Workspace:** + ```hcl + # Creates Log Analytics Workspace + resource "azurerm_log_analytics_workspace" "law" { + name = var.log_analytics_workspace_name + location = var.location + resource_group_name = var.resource_group_name + sku = var.sku + retention_in_days = var.retention_in_days + + tags = var.tags + } + + # Enables Container Insights solution + resource "azurerm_log_analytics_solution" "container_insights" { + solution_name = "ContainerInsights" + location = var.location + resource_group_name = var.resource_group_name + workspace_resource_id = azurerm_log_analytics_workspace.law.id + workspace_name = azurerm_log_analytics_workspace.law.name + + plan { + publisher = "Microsoft" + product = "OMSGallery/ContainerInsights" + } + } + ``` + + **🎯 Configuration Highlights:** + - [ ] **PerGB2018 SKU** - Pay-per-GB ingestion model + - [ ] **Flexible retention** - Configurable data retention (30-730 days) + - [ ] **Container Insights** - Specialized monitoring for Kubernetes + - [ ] **Resource tagging** - Consistent labeling for management + +### **Step 2: Deploy Log Analytics Infrastructure** ⏱️ *8 minutes* + +4. **πŸ”§ Initialize Terraform Backend** + ```bash + terraform init + ``` + **βœ… Expected Output:** + ``` + Initializing the backend... + Successfully configured the backend "azurerm"! + Terraform has been successfully initialized! + ``` + +5. **πŸ“‹ Validate Configuration** + ```bash + terraform validate + ``` + **βœ… Expected:** "Success! The configuration is valid." + +6. **πŸ“Š Review Deployment Plan** + ```bash + terraform plan + ``` + **⏱️ Planning Time:** 30-60 seconds + **πŸ” Review Output:** Look for 2 resources to be created: + - `azurerm_log_analytics_workspace.law` + - `azurerm_log_analytics_solution.container_insights` + +7. **πŸš€ Deploy Infrastructure** + ```bash + terraform apply + ``` + **⏱️ Deployment Time:** 3-5 minutes + **οΏ½ Tip:** Type `yes` when prompted to confirm deployment + + **βœ… Expected Completion Message:** + ``` + Apply complete! Resources: 2 added, 0 changed, 0 destroyed. + + Outputs: + workspace_id = "/subscriptions/.../resourceGroups/.../providers/Microsoft.OperationalInsights/workspaces/..." + workspace_key = "" + ``` + +### **Step 3: Verify and Test Deployment** ⏱️ *5 minutes* + +8. **🌐 Check Azure Portal** + - Navigate to [Azure Portal](https://portal.azure.com) + - Go to your resource group + - Verify these resources are created: + - [ ] Log Analytics workspace with correct name + - [ ] Container Insights solution enabled + - [ ] Workspace in "Running" state + +9. **πŸ“‹ Verify Using Azure CLI** + ```bash + # List Log Analytics workspaces + az monitor log-analytics workspace list --resource-group --output table + + # Check workspace details + az monitor log-analytics workspace show --workspace-name --resource-group + + # Verify Container Insights solution + az monitor log-analytics solution list --resource-group --output table + ``` + +10. **πŸ” Test Workspace Connectivity** + ```bash + # Get workspace details from Terraform output + WORKSPACE_ID=$(terraform output -raw workspace_id) + echo "Workspace ID: $WORKSPACE_ID" + + # Test workspace is accessible + az monitor log-analytics workspace show --ids $WORKSPACE_ID --query "{Name:name,State:provisioningState,Sku:sku.name}" + ``` + +## βœ… **Validation Steps** + +**πŸ” Infrastructure Validation:** +- [ ] Log Analytics workspace created with correct configuration +- [ ] Container Insights solution deployed and active +- [ ] Workspace in "Succeeded" provisioning state +- [ ] Correct SKU and retention settings applied +- [ ] Resource tags properly applied + +**πŸ”§ Technical Validation:** +```bash +# Comprehensive validation script +echo "πŸ“Š Validating Log Analytics deployment..." + +# Get workspace details from Terraform +WORKSPACE_NAME=$(terraform output -raw workspace_name 2>/dev/null || echo "devops-law") +RG_NAME=$(terraform output -raw resource_group_name 2>/dev/null || echo "devops-rg") + +# Check if workspace exists and is running +WORKSPACE_STATE=$(az monitor log-analytics workspace show --workspace-name $WORKSPACE_NAME --resource-group $RG_NAME --query "provisioningState" -o tsv 2>/dev/null) + +if [ "$WORKSPACE_STATE" = "Succeeded" ]; then + echo "βœ… Log Analytics workspace is running" + + # Check retention settings + RETENTION=$(az monitor log-analytics workspace show --workspace-name $WORKSPACE_NAME --resource-group $RG_NAME --query "retentionInDays" -o tsv) + echo "πŸ“… Data retention: $RETENTION days" + + # Check SKU + SKU=$(az monitor log-analytics workspace show --workspace-name $WORKSPACE_NAME --resource-group $RG_NAME --query "sku.name" -o tsv) + echo "πŸ’° Pricing tier: $SKU" + + # Check Container Insights solution + SOLUTION_COUNT=$(az monitor log-analytics solution list --resource-group $RG_NAME --query "length(@)") + echo "πŸ”§ Solutions installed: $SOLUTION_COUNT" + + echo "βœ… Log Analytics validation complete!" +else + echo "❌ Log Analytics validation failed - State: $WORKSPACE_STATE" + exit 1 +fi +``` + +**πŸ“Š Monitoring Readiness:** +- [ ] **Data Collection** - Workspace ready to receive logs +- [ ] **Query Interface** - KQL queries can be executed +- [ ] **Container Insights** - AKS monitoring solution active +- [ ] **Alerting Capability** - Ready for alert rule creation +- [ ] **Dashboard Integration** - Compatible with Azure dashboards + +## 🚨 **Troubleshooting Guide** + +**❌ Common Deployment Issues:** +```bash +# Problem: Workspace name already exists globally +# Solution: Log Analytics workspace names must be globally unique +terraform plan | grep "already exists" + +# Problem: Insufficient permissions +# Solution: Verify contributor access to subscription +az role assignment list --assignee $(az account show --query user.name -o tsv) --query "[?roleDefinitionName=='Contributor']" + +# Problem: Solution deployment fails +# Solution: Check if ContainerInsights is supported in region +az provider show --namespace Microsoft.OperationsManagement --query "resourceTypes[?resourceType=='solutions'].locations" +``` + +**πŸ”§ Configuration Issues:** +```bash +# Problem: Retention period invalid +# Solution: Verify retention is between 30-730 days +terraform plan | grep "retention_in_days" + +# Problem: SKU not supported +# Solution: Check available SKUs for your region +az monitor log-analytics workspace list-usages --workspace-name --resource-group + +# Problem: Resource group not found +# Solution: Verify resource group exists +az group show --name +``` + +**🧹 Cleanup Commands:** +```bash +# Remove specific solution if deployment fails +terraform destroy -target=azurerm_log_analytics_solution.container_insights + +# Remove workspace +terraform destroy -target=azurerm_log_analytics_workspace.law + +# Complete cleanup +terraform destroy +``` + +## πŸ’‘ **Knowledge Check** + +**🎯 Monitoring Questions:** +1. What's the difference between Log Analytics and Application Insights? +2. Why is Container Insights specifically important for AKS? +3. How does data retention impact costs in Log Analytics? +4. What types of queries can you run in Log Analytics? + +**πŸ“ Answers:** +1. **Log Analytics** provides infrastructure and application logs; **Application Insights** focuses on application performance monitoring +2. **Container Insights** provides Kubernetes-specific metrics, logs, and visualizations for pod, node, and cluster health +3. **Longer retention** increases storage costs; typical retention is 30-90 days for cost optimization +4. **KQL queries** can analyze logs, metrics, performance data, and create custom alerts and dashboards + +**πŸ” Technical Deep Dive:** +- **Query Language:** How would you write a KQL query to find failed pods? +- **Alerting:** What metrics would you monitor for AKS cluster health? +- **Cost Management:** How can you optimize Log Analytics costs? + +## 🎯 **Next Steps** + +**βœ… Upon Completion:** +- [ ] Log Analytics workspace successfully deployed +- [ ] Container Insights solution enabled +- [ ] Monitoring infrastructure ready for AKS +- [ ] Understanding of Azure monitoring concepts +- [ ] Ready for AKS cluster creation + +**➑️ Continue to:** [Create AKS Cluster & IAM Roles](./4-Create-AKS-Cluster-IAM-Roles.md) + +--- + +## πŸ“š **Additional Resources** + +- πŸ”— [Azure Log Analytics Documentation](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/) +- πŸ”— [Container Insights Overview](https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-overview) +- πŸ”— [KQL Query Language](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/) +- πŸ”— [Log Analytics Pricing](https://azure.microsoft.com/en-us/pricing/details/monitor/) + +**🎯 Pro Tips:** +- Set up **data retention policies** based on compliance requirements +- Use **workspace-based pricing** for predictable costs +- Create **custom dashboards** for real-time monitoring +- Implement **automated alerts** for proactive issue detection diff --git a/2-Terraform-AZURE-Services-Creation/3-log-analytics/README.md b/2-Terraform-AZURE-Services-Creation/3-log-analytics/README.md new file mode 100644 index 0000000..05450a5 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/3-log-analytics/README.md @@ -0,0 +1,38 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.11 | +| [azurerm](#requirement\_azurerm) | >= 4.27.0 | + +## Providers + +| Name | Version | +|------|---------| +| [azurerm](#provider\_azurerm) | >= 4.27.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [azurerm_log_analytics_solution.Log_Analytics_Solution_ContainerInsights](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_solution) | resource | +| [azurerm_log_analytics_workspace.Log_Analytics_WorkSpace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_workspace) | resource | +| [azurerm_resource_group.resource_group](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [location](#input\_location) | Azure Location of resources | `string` | `"uksouth"` | no | +| [name](#input\_name) | Name for resources | `string` | `"devopsthehardway"` | no | +| [tags](#input\_tags) | n/a | `map(string)` | n/a | yes | + +## Outputs + +No outputs. + \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/3-log-analytics/data.tf b/2-Terraform-AZURE-Services-Creation/3-log-analytics/data.tf new file mode 100644 index 0000000..6515a91 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/3-log-analytics/data.tf @@ -0,0 +1,3 @@ +data "azurerm_resource_group" "resource_group" { + name = "${var.name}-rg" +} diff --git a/2-Terraform-AZURE-Services-Creation/3-log-analytics/la.tf b/2-Terraform-AZURE-Services-Creation/3-log-analytics/la.tf new file mode 100644 index 0000000..432617b --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/3-log-analytics/la.tf @@ -0,0 +1,23 @@ +resource "azurerm_log_analytics_workspace" "Log_Analytics_WorkSpace" { + # The WorkSpace name has to be unique across the whole of azure, not just the current subscription/tenant. + name = "${var.name}-la" + location = var.location + resource_group_name = data.azurerm_resource_group.resource_group.name + sku = "PerGB2018" + + tags = var.tags + +} + +resource "azurerm_log_analytics_solution" "Log_Analytics_Solution_ContainerInsights" { + solution_name = "ContainerInsights" + location = azurerm_log_analytics_workspace.Log_Analytics_WorkSpace.location + resource_group_name = data.azurerm_resource_group.resource_group.name + workspace_resource_id = azurerm_log_analytics_workspace.Log_Analytics_WorkSpace.id + workspace_name = azurerm_log_analytics_workspace.Log_Analytics_WorkSpace.name + + plan { + publisher = "Microsoft" + product = "OMSGallery/ContainerInsights" + } +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/3-log-analytics/providers.tf b/2-Terraform-AZURE-Services-Creation/3-log-analytics/providers.tf new file mode 100644 index 0000000..a9edeed --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/3-log-analytics/providers.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.14.0, < 2.0.0" + backend "azurerm" { + resource_group_name = "devopshardway-rg" + storage_account_name = "devopshardwaysa" + container_name = "tfstate" + key = "la-terraform.tfstate" + } + + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = ">= 4.68.0, < 5.0.0" + } + } +} + +provider "azurerm" { + features {} + subscription_id = "04109105-f3ca-44ac-a3a7-66b4936112c3" + +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/3-log-analytics/terraform.tfvars b/2-Terraform-AZURE-Services-Creation/3-log-analytics/terraform.tfvars new file mode 100644 index 0000000..de58302 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/3-log-analytics/terraform.tfvars @@ -0,0 +1,9 @@ +name = "devopsthehardway" +location = "uksouth" + +tags = { + "Purpose" = "azure-devops-hardway" + "Environment" = "DevOps" + "DeployedBy" = "Terraform" + "Project" = "devopsthehardway" +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/3-log-analytics/variables.tf b/2-Terraform-AZURE-Services-Creation/3-log-analytics/variables.tf new file mode 100644 index 0000000..713e2be --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/3-log-analytics/variables.tf @@ -0,0 +1,15 @@ +variable "name" { + type = string + default = "devopsthehardway" + description = "Name for resources" +} + +variable "location" { + type = string + default = "uksouth" + description = "Azure Location of resources" +} + +variable "tags" { + type = map(string) +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/4-Create-AKS-Cluster-IAM-Roles.md b/2-Terraform-AZURE-Services-Creation/4-Create-AKS-Cluster-IAM-Roles.md new file mode 100644 index 0000000..16eaf25 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/4-Create-AKS-Cluster-IAM-Roles.md @@ -0,0 +1,306 @@ +# Create An AKS Cluster and IAM Roles + +## 🎯 **Tutorial Overview* +**Estimated Time:** ⏱️ **25-35 minutes** +**Prerequisites Level:** Kubernetes and Azure IAM knowledge required + +In this lab, you'll create an Azure Kubernetes Service (AKS) cluster with advanced security configurations and set up the necessary Identity and Access Management (IAM) roles. + +### πŸ“‹ **Learning Objectives** +By the end of this tutorial, you will: +- [ ] Deploy a production-ready AKS cluster with auto-scaling +- [ ] Configure Azure RBAC for Kubernetes access control +- [ ] Set up managed identities for secure authentication +- [ ] Implement network policies for enhanced security +- [ ] Understand AKS availability zones and high availability +- [ ] Validate cluster functionality and access + +### ⚠️ **Important Notes** +- This creates a Standard Load Balancer (may incur costs) +- SSH key is required for node access +- Azure AD group must exist for admin access +- Network configuration depends on previous VNET lab + +## πŸ› οΈ Create the AKS Terraform Configuration + +### βœ… **Prerequisites Checklist** +Before starting, ensure you have: +- [ ] **Completed previous labs:** + - [ ] Azure Container Registry (ACR) created + - [ ] Virtual Network (VNET) configured + - [ ] Log Analytics workspace deployed +- [ ] **Azure AD Group** created for AKS administrators +- [ ] **SSH Key Pair** generated for node access +- [ ] Basic understanding of Kubernetes concepts +- [ ] Azure CLI authenticated with sufficient permissions + +### 🧠 **Background Knowledge** +**What is Azure Kubernetes Service (AKS)?** +- Managed Kubernetes service in Azure +- Automated updates, scaling, and node management +- Integrated with Azure services (AAD, ACR, etc.) +- Built-in security and compliance features + +**Key Components:** +- **Node Pools:** Groups of VMs running your workloads +- **Managed Identity:** Secure authentication without secrets +- **RBAC:** Role-based access control integration +- **Network Policies:** Micro-segmentation for pods + +## πŸš€ **Step-by-Step Implementation** + +### **Step 1: Prepare Configuration** ⏱️ *10 minutes* + +1. **πŸ“ Review and Customize Variables** + - Open the [terraform.tfvars](https://github.com/thomast1906/DevOps-The-Hard-Way-Azure/tree/main/2-Terraform-AZURE-Services-Creation/4-aks/terraform.tfvars) file + - **Critical configurations to verify:** + + ```hcl + # Core cluster settings + name = "your-unique-name" + kubernetes_version = "1.35" + location = "uksouth" + + # Node configuration + agent_count = 2 # Start with 2 nodes + vm_size = "Standard_DS2_v2" # Suitable for learning + + # Security settings + ssh_public_key = "ssh-rsa AAAAB3N..." # Your SSH public key + aks_admins_group_object_id = "guid" # Azure AD group ID + ``` + + **πŸ”‘ Generate SSH Key (if needed):** + ```bash + ssh-keygen -t rsa -b 4096 -f ~/.ssh/aks_key -C "your-email@example.com" + cat ~/.ssh/aks_key.pub # Copy this to terraform.tfvars + ``` + +### **Step 2: Understand Infrastructure Components** ⏱️ *8 minutes* + +2. **πŸ—οΈ Review Terraform Configuration** + + Study the [AKS Terraform configuration](https://github.com/thomast1906/DevOps-The-Hard-Way-Azure/tree/main/2-Terraform-AZURE-Services-Creation/4-aks) components: + + **πŸ“„ aks.tf - Main Cluster Configuration:** + - [ ] **AKS Cluster** (`azurerm_kubernetes_cluster`) + - Kubernetes 1.35 with latest features + - Auto-scaling: 1-3 nodes for cost optimization + - Availability zones for high availability + - Azure RBAC integration + - Network policies for security + + **πŸ“„ managed_identity.tf - Security Setup:** + - [ ] **User Assigned Identity** for cluster authentication + - [ ] **Role Assignments** for ACR and network access + + **πŸ“„ rbac.tf - Access Control:** + - [ ] **Azure AD Integration** for user authentication + - [ ] **Admin Group Assignment** for cluster management + + **🎯 Key Features Enabled:** + - **Auto-scaling:** Responds to workload demands + - **Availability Zones:** Multi-zone deployment for HA + - **Azure RBAC:** Centralized access management + - **Network Policies:** Pod-to-pod communication control + - **Managed Identity:** Passwordless authentication + +### **Step 3: Deploy AKS Cluster** ⏱️ *15 minutes* + +3. **πŸš€ Create the AKS Infrastructure** + + **πŸ“‚ Navigate to AKS directory:** + ```bash + cd 2-Terraform-AZURE-Services-Creation/4-aks + ``` + + **πŸ”§ Initialize Terraform:** + ```bash + terraform init + ``` + **βœ… Expected:** Backend configured, providers downloaded + + **πŸ“‹ Plan deployment:** + ```bash + terraform plan + ``` + **βœ… Expected:** ~8-12 resources to be created + **⚠️ Review:** Ensure no unexpected changes + + **πŸš€ Deploy cluster:** + ```bash + terraform apply + ``` + **⏱️ Duration:** 10-15 minutes (AKS cluster creation is slow) + **βœ… Expected:** All resources created successfully + +## βœ… **Validation & Testing** + +### **Step 4: Verify AKS Deployment** ⏱️ *12 minutes* + +**πŸ” Comprehensive validation of your AKS cluster:** + +1. **Azure Portal Verification:** + - [ ] Navigate to [Kubernetes Services](https://portal.azure.com/#browse/Microsoft.ContainerService%2FmanagedClusters) + - [ ] Locate your AKS cluster + - [ ] Verify cluster status: **Running** + - [ ] Check node pool: **Ready** with correct node count + - [ ] Confirm Kubernetes version: **1.35** + +2. **Get Cluster Credentials:** + ```bash + # Download cluster credentials + az aks get-credentials --resource-group "your-rg-name" --name "your-aks-name" + + # Verify kubectl context + kubectl config current-context + ``` + **βœ… Expected:** Context points to your AKS cluster + +3. **Cluster Functionality Tests:** + ```bash + # Check cluster info + kubectl cluster-info + + # List nodes + kubectl get nodes -o wide + + # Check system pods + kubectl get pods -n kube-system + + # Verify RBAC is working + kubectl auth can-i get pods --as=system:serviceaccount:default:default + ``` + +4. **Network Policy Validation:** + ```bash + # Check if network policies are supported + kubectl get networkpolicies --all-namespaces + + # Verify CNI plugin + kubectl get daemonset -n kube-system + ``` + +**πŸ“Έ Expected Azure Portal View:** +![AKS Cluster in Portal](images/4-aks.png) + +### **πŸ§ͺ Advanced Functionality Tests** +```bash +# Test auto-scaling (optional) +kubectl create deployment test-scale --image=nginx --replicas=10 +kubectl get pods -w + +# Test Azure integration +kubectl create secret generic test-secret --from-literal=key=value +kubectl get secrets + +# Clean up test resources +kubectl delete deployment test-scale +kubectl delete secret test-secret +``` + +## 🚨 **Troubleshooting Guide** + +### **Common Issues & Solutions** + +| ❌ **Problem** | πŸ”§ **Solution** | +|----------------|-----------------| +| SSH key format error | Ensure SSH key starts with `ssh-rsa` and is one line | +| Azure AD group not found | Verify group Object ID (not display name) | +| Insufficient quota | Check regional vCPU limits in Azure Portal | +| Network connectivity issues | Verify VNET and subnet configurations | +| RBAC authentication failed | Confirm user is in specified Azure AD group | + +### **πŸ†˜ Detailed Troubleshooting** + +**Issue: "Invalid SSH public key"** +```bash +# Validate SSH key format +ssh-keygen -l -f ~/.ssh/your_key.pub + +# Regenerate if needed +ssh-keygen -t rsa -b 4096 -f ~/.ssh/aks_key -N "" +``` + +**Issue: "Quota exceeded"** +```bash +# Check current usage +az vm list-usage --location "uksouth" --query "[?name.value=='cores']" + +# Request quota increase in Azure Portal +``` + +**Issue: "Cannot connect to cluster"** +```bash +# Verify credentials +az aks get-credentials --resource-group "rg-name" --name "cluster-name" --overwrite-existing + +# Check Azure AD authentication +az aks get-versions --location "uksouth" +``` + +## πŸŽ“ **Knowledge Check Questions** + +Test your understanding: + +- [ ] **Question 1:** Why is auto-scaling important for AKS clusters? +
+ πŸ’‘ Answer + Auto-scaling automatically adjusts node count based on resource demands, optimizing costs by scaling down during low usage and ensuring performance during peak loads. +
+ +- [ ] **Question 2:** What's the benefit of Azure RBAC integration? +
+ πŸ’‘ Answer + Centralizes access management through Azure AD, eliminates need for separate Kubernetes RBAC, provides audit trails, and integrates with enterprise identity systems. +
+ +- [ ] **Question 3:** Why use availability zones for AKS? +
+ οΏ½ Answer + Distributes nodes across multiple data centers within a region, providing protection against single zone failures and improving overall availability. +
+ +## 🎯 **Achievement Unlocked!** +πŸ† **Kubernetes Orchestrator** - You've successfully deployed a production-ready AKS cluster with advanced security! + +### **What You've Accomplished:** +- [x] Deployed AKS cluster with Kubernetes 1.35 +- [x] Configured auto-scaling and high availability +- [x] Implemented Azure RBAC and managed identities +- [x] Set up network policies for security +- [x] Validated cluster functionality +- [x] Mastered AKS troubleshooting + +### **Next Steps:** +- [ ] Proceed to [CI/CD Setup](./5-Run-CICD-For-AKS-Cluster.md) +- [ ] Learn about [Docker containerization](../3-Docker/1-Create-Docker-Image.md) + +## πŸ’‘ **Pro Tips & Best Practices** +5. Check the IAM settings to confirm the role assignments + +Example screenshot of created resources: + +![](images/4-aks.png) + +## 🧠 Knowledge Check + +After creating the AKS cluster and IAM roles, consider these questions: +1. Why is it important to use managed identities with AKS? +2. How does Azure RBAC enhance the security of your AKS cluster compared to basic RBAC? +3. What are the benefits of using federated identity credentials? +4. How does auto-scaling help with cost optimization and performance? +5. Why are availability zones important for production workloads? +6. What security benefits do network policies provide? + +## πŸ’‘ Pro Tips + +1. **Security Best Practices**: + - Enable Azure Policy for Kubernetes to enforce organisational standards and assess compliance at scale + - Regularly review and audit RBAC permissions + - Monitor cluster logs through the integrated Log Analytics workspace + +2. **Cost Optimisation**: + - Auto-scaling will automatically adjust node count based on demand + - Use spot instances for non-critical workloads to reduce costs + - Monitor resource usage through Azure Monitor \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/4-aks/README.md b/2-Terraform-AZURE-Services-Creation/4-aks/README.md new file mode 100644 index 0000000..b80be58 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/4-aks/README.md @@ -0,0 +1,54 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.11 | +| [azurerm](#requirement\_azurerm) | >= 4.27.0 | + +## Providers + +| Name | Version | +|------|---------| +| [azurerm](#provider\_azurerm) | >= 4.27.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [azurerm_federated_identity_credential.alb_federated_identity](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/federated_identity_credential) | resource | +| [azurerm_kubernetes_cluster.k8s](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource | +| [azurerm_role_assignment.acr_pull](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.appgwcontainerfix2](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.appgwcontainerfix3](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.node_infrastructure_update_scale_set](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_user_assigned_identity.alb_identity](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/user_assigned_identity) | resource | +| [azurerm_container_registry.acr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/container_registry) | data source | +| [azurerm_log_analytics_workspace.workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/log_analytics_workspace) | data source | +| [azurerm_resource_group.node_resource_group](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | +| [azurerm_resource_group.resource_group](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | +| [azurerm_subnet.akssubnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/subnet) | data source | +| [azurerm_subnet.appgwsubnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/subnet) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [agent\_count](#input\_agent\_count) | n/a | `any` | n/a | yes | +| [aks\_admins\_group\_object\_id](#input\_aks\_admins\_group\_object\_id) | n/a | `any` | n/a | yes | +| [kubernetes\_cluster\_rbac\_enabled](#input\_kubernetes\_cluster\_rbac\_enabled) | n/a | `string` | `"true"` | no | +| [kubernetes\_version](#input\_kubernetes\_version) | n/a | `any` | n/a | yes | +| [location](#input\_location) | Azure Location of resources | `string` | `"uksouth"` | no | +| [name](#input\_name) | Name for resources | `string` | `"devopsthehardway"` | no | +| [ssh\_public\_key](#input\_ssh\_public\_key) | n/a | `any` | n/a | yes | +| [tags](#input\_tags) | n/a | `map(string)` | n/a | yes | +| [vm\_size](#input\_vm\_size) | n/a | `any` | n/a | yes | + +## Outputs + +No outputs. + \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/4-aks/aks.tf b/2-Terraform-AZURE-Services-Creation/4-aks/aks.tf new file mode 100644 index 0000000..0c0d87d --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/4-aks/aks.tf @@ -0,0 +1,59 @@ +resource "azurerm_kubernetes_cluster" "k8s" { + name = "${var.name}aks" + location = var.location + resource_group_name = data.azurerm_resource_group.resource_group.name + dns_prefix = "${var.name}dns" + kubernetes_version = var.kubernetes_version + oidc_issuer_enabled = true + workload_identity_enabled = true + node_resource_group = "${var.name}-node-rg" + automatic_upgrade_channel = "patch" + local_account_disabled = false + + linux_profile { + admin_username = "ubuntu" + + ssh_key { + key_data = var.ssh_public_key + } + } + + default_node_pool { + name = "agentpool" + node_count = var.agent_count + vm_size = var.vm_size + vnet_subnet_id = data.azurerm_subnet.akssubnet.id + type = "VirtualMachineScaleSets" + orchestrator_version = var.kubernetes_version + auto_scaling_enabled = true + min_count = 1 + max_count = 3 + max_pods = 30 + os_disk_size_gb = 30 + zones = ["1", "2", "3"] + } + + identity { + type = "SystemAssigned" + } + + oms_agent { + log_analytics_workspace_id = data.azurerm_log_analytics_workspace.workspace.id + } + + network_profile { + load_balancer_sku = "standard" + network_plugin = "azure" + network_policy = "azure" + dns_service_ip = "10.2.0.10" + service_cidr = "10.2.0.0/24" + } + + azure_active_directory_role_based_access_control { + azure_rbac_enabled = true + admin_group_object_ids = [var.aks_admins_group_object_id] + } + + tags = var.tags + +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/4-aks/data.tf b/2-Terraform-AZURE-Services-Creation/4-aks/data.tf new file mode 100644 index 0000000..d30371e --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/4-aks/data.tf @@ -0,0 +1,32 @@ +data "azurerm_resource_group" "resource_group" { + name = "${var.name}-rg" +} + +data "azurerm_subnet" "akssubnet" { + name = "aks" + virtual_network_name = "${var.name}-vnet" + resource_group_name = data.azurerm_resource_group.resource_group.name +} + +data "azurerm_subnet" "appgwsubnet" { + name = "appgw" + virtual_network_name = "${var.name}-vnet" + resource_group_name = data.azurerm_resource_group.resource_group.name +} + +data "azurerm_log_analytics_workspace" "workspace" { + name = "${var.name}-la" + resource_group_name = data.azurerm_resource_group.resource_group.name +} + +data "azurerm_container_registry" "acr" { + name = "${var.name}azurecr" + resource_group_name = data.azurerm_resource_group.resource_group.name +} + +data "azurerm_resource_group" "node_resource_group" { + name = azurerm_kubernetes_cluster.k8s.node_resource_group + depends_on = [ + azurerm_kubernetes_cluster.k8s + ] +} diff --git a/2-Terraform-AZURE-Services-Creation/4-aks/managed_identity.tf b/2-Terraform-AZURE-Services-Creation/4-aks/managed_identity.tf new file mode 100644 index 0000000..04bf1a7 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/4-aks/managed_identity.tf @@ -0,0 +1,20 @@ +resource "azurerm_user_assigned_identity" "alb_identity" { + location = var.location + resource_group_name = data.azurerm_resource_group.resource_group.name + name = "azure-alb-identity" +} + +resource "azurerm_federated_identity_credential" "alb_federated_identity" { + name = "azure-alb-identity" + resource_group_name = data.azurerm_resource_group.resource_group.name + audience = ["api://AzureADTokenExchange"] + issuer = azurerm_kubernetes_cluster.k8s.oidc_issuer_url + parent_id = azurerm_user_assigned_identity.alb_identity.id + subject = "system:serviceaccount:azure-alb-system:alb-controller-sa" + + depends_on = [ + azurerm_user_assigned_identity.alb_identity, + azurerm_kubernetes_cluster.k8s + + ] +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/4-aks/providers.tf b/2-Terraform-AZURE-Services-Creation/4-aks/providers.tf new file mode 100644 index 0000000..c6014a5 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/4-aks/providers.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.14.0, < 2.0.0" + backend "azurerm" { + resource_group_name = "devopshardway-rg" + storage_account_name = "devopshardwaysa" + container_name = "tfstate" + key = "aks-terraform.tfstate" + } + + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = ">= 4.68.0, < 5.0.0" + } + } +} + +provider "azurerm" { + features {} + subscription_id = "04109105-f3ca-44ac-a3a7-66b4936112c3" + +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/4-aks/rbac.tf b/2-Terraform-AZURE-Services-Creation/4-aks/rbac.tf new file mode 100644 index 0000000..e9430be --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/4-aks/rbac.tf @@ -0,0 +1,43 @@ +resource "azurerm_role_assignment" "node_infrastructure_update_scale_set" { + principal_id = azurerm_kubernetes_cluster.k8s.kubelet_identity[0].object_id + scope = data.azurerm_resource_group.node_resource_group.id + role_definition_name = "Virtual Machine Contributor" + depends_on = [ + azurerm_kubernetes_cluster.k8s + ] +} + +resource "azurerm_role_assignment" "acr_pull" { + principal_id = azurerm_kubernetes_cluster.k8s.kubelet_identity[0].object_id + scope = data.azurerm_container_registry.acr.id + role_definition_name = "acrpull" + depends_on = [ + azurerm_kubernetes_cluster.k8s + ] +} + +#fixing for "The client '62119122-6287-4620-98b4-bf86535e2ece' with object id '62119122-6287-4620-98b4-bf86535e2ece' does not have authorization to perform action 'Microsoft.ServiceNetworking/register/action' over scope '/subscriptions/XXXXX' or the scope is invalid. (As part of App Gw for containers - maanged by ALB controller setup)" + +# Delegate AppGw for Containers Configuration Manager role to RG containing Application Gateway for Containers resource +# az role assignment create --assignee-object-id $principalId --assignee-principal-type ServicePrincipal --scope $resourceGroupId --role "fbc52c3f-28ad-4303-a892-8a056630b8f1" +resource "azurerm_role_assignment" "appgwcontainerfix2" { + principal_id = azurerm_user_assigned_identity.alb_identity.principal_id + scope = data.azurerm_resource_group.resource_group.id + role_definition_name = "AppGw for Containers Configuration Manager" + depends_on = [ + azurerm_kubernetes_cluster.k8s, + azurerm_user_assigned_identity.alb_identity + ] +} + +# Delegate Network Contributor permission for join to association subnet +# az role assignment create --assignee-object-id $principalId --assignee-principal-type ServicePrincipal --scope $ALB_SUBNET_ID --role "4d97b98b-1d4f-4787-a291-c67834d212e7" +resource "azurerm_role_assignment" "appgwcontainerfix3" { + principal_id = azurerm_user_assigned_identity.alb_identity.principal_id + scope = data.azurerm_subnet.appgwsubnet.id + role_definition_name = "Network Contributor" + depends_on = [ + azurerm_kubernetes_cluster.k8s, + azurerm_user_assigned_identity.alb_identity + ] +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/4-aks/terraform.tfvars b/2-Terraform-AZURE-Services-Creation/4-aks/terraform.tfvars new file mode 100644 index 0000000..4bed0ab --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/4-aks/terraform.tfvars @@ -0,0 +1,15 @@ +name = "devopsthehardway" +location = "uksouth" + +kubernetes_version = "1.35" +agent_count = 3 +vm_size = "Standard_DS2_v2" +ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDrt/GYkYpuQYRxM3lgjOr3Wqx8g5nQIbrg6Mr53wZGb35+ft+PibDMqxXZ7xq7fC3YuLnnO022IPgEjkF9fP03ZmfUeLjJJvw8YcutN9DD/2cx93BpKFPNUsqEB+za1iJ16kMsCojy35c1R64O+rw20D6iP96rmDAyIc5FR03y00eyAzQ8vo7/u9+VPwpdGEI7QCokZROcj6iNVz1V/1t6G4AEufPLokdj8J0gla/dN+tvnSLRQVBTDiD4jmVGImpWFqqKaH6R9SSXmRzj0uhvJUmSiZAZCb1caPEYgPEvNITuGQFdykPoY/4Z/3B+x/ipEQbWy8yL7bDFSXZTYhVKlPVyPbUtN5QFt7QtCtg84xDAZ6GA6AnONTtMxX2jvdzB9yh1ZsteNrOZ/Jo3ecuie573syQfG23Tu6qTqak8O7ZTOLY9iPx2ego3KvTWH/Q3lIvjnlpfCQtFtSgkNxjalMBk+NwwEgZHWRREOHwJmQIKVN0gSitN1KXobrqwxNk= tamops@Synth" +aks_admins_group_object_id = "e97b6454-3fa1-499e-8e5c-5d631e9ca4d1" + +tags = { + "Purpose" = "azure-devops-hardway" + "Environment" = "DevOps" + "DeployedBy" = "Terraform" + "Project" = "devopsthehardway" +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/4-aks/variables.tf b/2-Terraform-AZURE-Services-Creation/4-aks/variables.tf new file mode 100644 index 0000000..6c462cf --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/4-aks/variables.tf @@ -0,0 +1,34 @@ +variable "name" { + type = string + default = "devopsthehardway" + description = "Name for resources" +} + +variable "location" { + type = string + default = "uksouth" + description = "Azure Location of resources" +} + +variable "kubernetes_cluster_rbac_enabled" { + default = "true" +} + +variable "kubernetes_version" { +} + +variable "agent_count" { +} + +variable "vm_size" { +} + +variable "ssh_public_key" { +} + +variable "aks_admins_group_object_id" { +} + +variable "tags" { + type = map(string) +} \ No newline at end of file diff --git a/2-Terraform-AZURE-Services-Creation/5-Run-CICD-For-AKS-Cluster.md b/2-Terraform-AZURE-Services-Creation/5-Run-CICD-For-AKS-Cluster.md new file mode 100644 index 0000000..4f1d616 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/5-Run-CICD-For-AKS-Cluster.md @@ -0,0 +1,132 @@ +# Create AKS Cluster With CI/CD + +## 🎯 Purpose +In this lab, you'll learn how to create an Azure Kubernetes Service (AKS) cluster using GitHub Actions for continuous integration and continuous deployment (CI/CD). + +## πŸ› οΈ Setup and Configuration + +### Prerequisites +- [ ] Basic understanding of Terraform and GitHub Actions + + +### Steps + +1. **Review and Customise Variables** + - Open the `terraform.tfvars` file in the [AKS Terraform configuration](https://github.com/thomast1906/DevOps-The-Hard-Way-Azure/tree/main/2-Terraform-AZURE-Services-Creation/4-aks). + - Ensure all values are accurate for your environment. + +2. **Set Up GitHub OIDC Authentication with Azure** + + Set up a more secure authentication method using GitHub OIDC (OpenID Connect) with Azure: + + - First, customise the [script](https://github.com/thomast1906/DevOps-The-Hard-Way-Azure/tree/main/2-Terraform-AZURE-Services-Creation/scripts/5-create-github-oidc.sh) variables if needed: + ```bash + # Variables you may want to customise: + APP_DISPLAY_NAME="DevOps-The-Hardway-Azure-GitHub-OIDC" # Name of the Azure AD app registration + GITHUB_REPO="thomast1906/DevOps-The-Hard-Way-Azure" # Your GitHub repository name + ``` + Update these variables to match your specific environment if different from the defaults. + + - Run the provided script: + ```bash + ./scripts/5-create-github-oidc.sh + ``` + + The script performs these actions: + - [ ] Creates an Azure AD application registration named "DevOps-The-Hardway-Azure" + - [ ] Creates a corresponding service principal + - [ ] Sets up federated credentials for: + - GitHub main branch workflows (`repo:thomast1906/DevOps-The-Hard-Way-Azure:ref:refs/heads/main`) + - Renovate branch workflows (`repo:thomast1906/DevOps-The-Hard-Way-Azure:ref:refs/heads/renovate/configure`) + - Pull request workflows (`repo:thomast1906/DevOps-The-Hard-Way-Azure:pull_request`) + + If you need to customise the federated credentials for different branches or repositories, edit the `create_federated_credential` function calls in the script. + + **Note**: After running the script, it will output all the necessary information and next steps. You'll need to assign appropriate IAM permissions (e.g., Contributor access to the subscription) to the Service Principal using: + ```bash + # Store the app ID in a variable + APP_ID=$(az ad app list --display-name "DevOps-The-Hardway-Azure-GitHub-OIDC" --query "[].appId" -o tsv) + + # Get the service principal ID + SP_ID=$(az ad sp list --filter "appId eq '$APP_ID'" --query "[].id" -o tsv) + + # Assign Contributor role to the subscription + az role assignment create --assignee $SP_ID --role "Contributor" --scope "/subscriptions/YOUR_SUBSCRIPTION_ID" + ``` + + The script will automatically output the exact commands needed with your specific IDs, so you can simply copy and paste them from the terminal output. + +3. **Configure GitHub Repository Settings** + Configure your GitHub repository to use the OIDC connection: + + - Add the following secrets to your GitHub repository (Settings > Secrets > Actions): + - `AZURE_CLIENT_ID`: The App ID you created + - `AZURE_TENANT_ID`: Your Azure AD tenant ID + - `AZURE_SUBSCRIPTION_ID`: Your Azure subscription ID + + Note: All three values will be automatically displayed in the output of the `5-create-github-oidc.sh` script, so you can copy them directly from there. + +4. **Set Up GitHub Actions Workflow** + + ⚠️ **Important**: The workflow file `.github/workflows/main.yml` in this repository is **tutorial content only** and is disabled to prevent accidental runs. + + **To use this workflow in your own project:** + + a) **Copy the Repository**: + - Fork this repository to your own GitHub account, or + - Copy the workflow file to your own repository + + b) **Enable the Workflow**: + - Open `.github/workflows/main.yml` in your repository + - Remove the `tutorial_mode` input and the tutorial job + - Change the `on:` section to: + ```yaml + on: + push: + branches: + - main + pull_request: + branches: + - main + workflow_dispatch: + ``` + + c) **Customize for Your Environment**: + - Update the `terraform.tfvars` values in the workflow + - Update resource group names and storage account names + - Modify the backend configuration to match your setup + + d) **Run the Workflow**: + - Navigate to the Actions tab in your GitHub repository + - Select the `Terraform-Deploy` workflow + - Choose to run the workflow manually or trigger via push/PR + +## πŸ” Verification +**Note**: Since the workflow in this tutorial repository is disabled, these verification steps apply when you run the workflow in your own repository: + +1. Check the GitHub Actions logs in your repository for successful completion. +2. Log into the [Azure Portal](https://portal.azure.com) +3. Navigate to Kubernetes services +4. Verify that your new AKS cluster has been updated or created. + +### 🧠 Knowledge Check +The GitHub Actions workflow (when enabled in your own repository): +- [ ] Triggers manually (`workflow_dispatch`) or on pull requests/pushes to main +- [ ] Checks out the code +- [ ] Authenticates with Azure using OIDC (no secrets stored in GitHub) +- [ ] Sets up Terraform +- [ ] Formats and validates Terraform code +- [ ] Initialises Terraform +- [ ] Plans the Terraform changes +- [ ] Applies the Terraform configuration to create the AKS cluster + +## πŸ’‘ Pro Tip +Consider implementing these additional best practices: +- Use separate state files for different environments (dev, staging, production) to manage multiple AKS clusters efficiently +- Implement branch protection rules to prevent direct pushes to main +- Set up required reviewers for pull requests to the main branch +- Configure federated credentials with more specific patterns if needed: + ```bash + # For specific environments or branches + "subject": "repo:thomast1906/DevOps-The-Hard-Way-Azure:ref:refs/heads/env-*" + ``` diff --git a/2-Terraform-AZURE-Services-Creation/README.md b/2-Terraform-AZURE-Services-Creation/README.md new file mode 100644 index 0000000..8a9144c --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/README.md @@ -0,0 +1,48 @@ +# Terraform Azure Services Creation for DevOps The Hard Way + +## Overview +This directory contains the Terraform configurations needed to create the core Azure infrastructure components for the DevOps The Hard Way - Azure project. Each step builds upon the previous, creating a complete infrastructure for hosting containerised applications in Azure Kubernetes Service (AKS). + +## Labs in this Section + +### [1. Create Azure Container Registry (ACR)](./1-Create-ACR.md) +Set up an Azure Container Registry to store Docker images used by your application. + +### [2. Create Virtual Network](./2-Create-VNET.md) +Create a Virtual Network with the necessary subnets for AKS and Application Gateway. + +### [3. Create Log Analytics Workspace](./3-Create-Log-Analytics.md) +Establish a Log Analytics workspace to monitor your AKS cluster and applications. + +### [4. Create AKS Cluster and IAM Roles](./4-Create-AKS-Cluster-IAM-Roles.md) +Deploy an Azure Kubernetes Service cluster with proper Azure AD integration and RBAC. + +### [5. Set Up CI/CD for AKS Cluster](./5-Run-CICD-For-AKS-Cluster.md) +Configure GitHub Actions for continuous integration and deployment to your AKS cluster. + +## Terraform Structure + +Each component is organised in its own directory with a consistent structure: +- `providers.tf`: Defines Azure provider configuration +- `variables.tf`: Declares input variables +- `terraform.tfvars`: Sets default values for variables +- Resource-specific `.tf` files: Contain the actual resource definitions +- `data.tf`: Contains data sources used by the configurations + +## Pre-requisites + +Before starting these labs, ensure you have: + +1. Completed the steps in the [1-Azure](../1-Azure) section +2. Terraform installed (version 1.14.8 or higher) +3. Azure CLI installed and configured (`az login` executed) +4. Basic familiarity with Terraform and Azure infrastructure concepts + +## Best Practices Applied + +- Resource naming conventions following Azure recommendations +- Consistent tagging across all resources for better governance +- Secure network design with proper subnet segregation +- RBAC-based access control +- Infrastructure as Code for reproducibility and consistency +- Remote state management using Azure Storage diff --git a/2-Terraform-AZURE-Services-Creation/images/1-acr.png b/2-Terraform-AZURE-Services-Creation/images/1-acr.png new file mode 100644 index 0000000..056e995 Binary files /dev/null and b/2-Terraform-AZURE-Services-Creation/images/1-acr.png differ diff --git a/2-Terraform-AZURE-Services-Creation/images/2-vnet.png b/2-Terraform-AZURE-Services-Creation/images/2-vnet.png new file mode 100644 index 0000000..3cd0e24 Binary files /dev/null and b/2-Terraform-AZURE-Services-Creation/images/2-vnet.png differ diff --git a/2-Terraform-AZURE-Services-Creation/images/3-la.png b/2-Terraform-AZURE-Services-Creation/images/3-la.png new file mode 100644 index 0000000..b2c8ebd Binary files /dev/null and b/2-Terraform-AZURE-Services-Creation/images/3-la.png differ diff --git a/2-Terraform-AZURE-Services-Creation/images/4-aks.png b/2-Terraform-AZURE-Services-Creation/images/4-aks.png new file mode 100644 index 0000000..f881f40 Binary files /dev/null and b/2-Terraform-AZURE-Services-Creation/images/4-aks.png differ diff --git a/2-Terraform-AZURE-Services-Creation/scripts/5-create-github-oidc.sh b/2-Terraform-AZURE-Services-Creation/scripts/5-create-github-oidc.sh new file mode 100755 index 0000000..d2a2bf8 --- /dev/null +++ b/2-Terraform-AZURE-Services-Creation/scripts/5-create-github-oidc.sh @@ -0,0 +1,96 @@ +#!/bin/sh + +# Configuration +APP_DISPLAY_NAME="DevOps-The-Hardway-Azure-GitHub-OIDC" +GITHUB_REPO="thomast1906/DevOps-The-Hard-Way-Azure" + +# Error handling function +handle_error() { + echo "ERROR: $1" + exit 1 +} + +# Verify Azure CLI is installed and user is logged in +if ! command -v az &> /dev/null; then + handle_error "Azure CLI is not installed. Please install it first: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli" +fi + +# Check if user is logged in +echo "Verifying Azure CLI login status..." +az account show &> /dev/null || handle_error "You are not logged in to Azure CLI. Please run 'az login' first." + +# Check if Azure AD App already exists +echo "Checking if Azure AD application $APP_DISPLAY_NAME already exists..." +APP_EXISTS=$(az ad app list --display-name "$APP_DISPLAY_NAME" --query "[].displayName" -o tsv) + +if [ "$APP_EXISTS" = "$APP_DISPLAY_NAME" ]; then + echo "Azure AD application $APP_DISPLAY_NAME already exists." + APP_ID=$(az ad app list --display-name "$APP_DISPLAY_NAME" --query "[0].appId" -o tsv) +else + # Create Azure AD application registration + echo "Creating Azure AD application $APP_DISPLAY_NAME..." + APP_ID=$(az ad app create --display-name "$APP_DISPLAY_NAME" --query appId -o tsv) || handle_error "Failed to create Azure AD application" +fi + +# Check if service principal exists +echo "Checking if service principal for $APP_DISPLAY_NAME already exists..." +SP_EXISTS=$(az ad sp list --filter "appId eq '$APP_ID'" --query "[].id" -o tsv) + +if [ -n "$SP_EXISTS" ]; then + echo "Service principal for $APP_DISPLAY_NAME already exists." + SP_ID=$SP_EXISTS +else + # Create service principal + echo "Creating service principal for $APP_DISPLAY_NAME..." + SP_ID=$(az ad sp create --id "$APP_ID" --query id -o tsv) || handle_error "Failed to create service principal" +fi + +# Function to create or update federated credential +create_federated_credential() { + local name=$1 + local subject=$2 + local description=$3 + + echo "Checking if federated credential $name already exists..." + CRED_EXISTS=$(az ad app federated-credential list --id "$APP_ID" --query "[?name=='$name'].name" -o tsv) + + if [ "$CRED_EXISTS" = "$name" ]; then + echo "Federated credential $name already exists." + else + echo "Creating federated credential $name..." + az ad app federated-credential create \ + --id "$APP_ID" \ + --parameters "{ + \"name\": \"$name\", + \"issuer\": \"https://token.actions.githubusercontent.com\", + \"subject\": \"$subject\", + \"description\": \"$description\", + \"audiences\": [\"api://AzureADTokenExchange\"] + }" || handle_error "Failed to create federated credential $name" + fi +} + +# Create federated credentials for different GitHub workflows +create_federated_credential "github-oidc-branch" "repo:$GITHUB_REPO:ref:refs/heads/main" "GitHub Actions OIDC - Branch Workflows (main)" +create_federated_credential "github-oidc-branch-renovate" "repo:$GITHUB_REPO:ref:refs/heads/renovate/configure" "GitHub Actions OIDC - Branch Renovate Workflows (renovate)" +create_federated_credential "github-oidc-pull-request" "repo:$GITHUB_REPO:pull_request" "GitHub Actions OIDC - Pull Request Workflows" + +# Get subscription ID +SUBSCRIPTION_ID=$(az account show --query id -o tsv) + +echo "βœ… Setup complete!" +echo "===========================================================================" +echo " APPLICATION (CLIENT) ID: $APP_ID" +echo " SERVICE PRINCIPAL ID: $SP_ID" +echo " TENANT ID: $(az account show --query tenantId -o tsv)" +echo " SUBSCRIPTION ID: $SUBSCRIPTION_ID" +echo "===========================================================================" +echo "Next step: Assign appropriate roles to the service principal:" +echo " az role assignment create --assignee $SP_ID --role \"Contributor\" \\" +echo " --scope \"/subscriptions/$SUBSCRIPTION_ID\"" +echo "===========================================================================" +echo "For GitHub Actions, add these secrets to your repository:" +echo " AZURE_CLIENT_ID: $APP_ID" +echo " AZURE_TENANT_ID: $(az account show --query tenantId -o tsv)" +echo " AZURE_SUBSCRIPTION_ID: $SUBSCRIPTION_ID" +echo "===========================================================================" \ No newline at end of file diff --git a/3-Docker/1-Create-Docker-Image.md b/3-Docker/1-Create-Docker-Image.md new file mode 100644 index 0000000..ac9a45a --- /dev/null +++ b/3-Docker/1-Create-Docker-Image.md @@ -0,0 +1,282 @@ +# Creating the Docker Image for the Thomasthornton.cloud App + +## 🎯 **Tutorial Overview** +**Estimated Time:** ⏱️ **15-20 minutes** +**Prerequisites Level:** Basic Docker knowledge helpful + +In this lab, you'll containerize the Thomasthornton.cloud Python Flask application and run it locally using Docker. + +### πŸ“‹ **Learning Objectives** +By the end of this tutorial, you will: +- [ ] Understand Docker containerization concepts +- [ ] Build a Docker image for a Python Flask application +- [ ] Run and test containerized applications locally +- [ ] Understand multi-stage builds and optimization +- [ ] Tag images for registry deployment +- [ ] Validate container functionality and security + +### ⚠️ **Important Notes** +- Ensure Docker Desktop is running before starting +- Image will be ~100MB due to Python base image +- Container runs on port 5000 by default +- Application includes health check endpoints + +## πŸ› οΈ Create The Docker Image + +### βœ… **Prerequisites Checklist** +Before starting, ensure you have: +- [ ] **Docker Desktop** installed and running +- [ ] **Terminal/Command Prompt** access +- [ ] **Text editor** for viewing configuration files +- [ ] Basic understanding of containerization concepts +- [ ] Python Flask application files available + +### πŸ“š **Background Knowledge** +**What is Docker?** +- Containerization platform for packaging applications +- Provides consistent runtime environment +- Isolates applications from host system +- Enables portable deployments across environments + +**Key Docker Concepts:** +- **Image:** Read-only template for containers +- **Container:** Running instance of an image +- **Dockerfile:** Instructions for building images +- **Registry:** Storage for Docker images + +## πŸš€ **Step-by-Step Implementation** + +### **Step 1: Explore Application Structure** ⏱️ *5 minutes* + +1. **πŸ“‚ Navigate to Docker Directory** + ```bash + cd 3-Docker + ``` + +2. **πŸ” Review Application Structure** + ```bash + ls -la + # Expected files: + # - Dockerfile + # - app/ + # β”œβ”€β”€ app.py + # β”œβ”€β”€ requirements.txt + # └── templates/ + # └── index.html + ``` + + **πŸ“‹ Application Components:** + - [ ] **app.py** - Flask web application + - [ ] **requirements.txt** - Python dependencies + - [ ] **templates/** - HTML templates + - [ ] **Dockerfile** - Container build instructions + +### **Step 2: Understand Dockerfile Configuration** ⏱️ *5 minutes* + +3. **πŸ“„ Review the Dockerfile** + ```dockerfile + # Key components explained: + FROM python:3.13-slim # Base image + WORKDIR /app # Working directory + COPY requirements.txt /app/ # Copy dependencies first + RUN pip install --no-cache-dir -r requirements.txt # Install deps + COPY app/ /app/ # Copy application code + EXPOSE 5000 # Expose port + CMD ["python", "app.py"] # Start command + ``` + + **🎯 Dockerfile Best Practices Implemented:** + - [ ] **Multi-layer caching** - Dependencies copied separately + - [ ] **Slim base image** - Reduces attack surface and size + - [ ] **Non-root user** - Enhanced security + - [ ] **Health checks** - Container monitoring + - [ ] **Clear working directory** - Organized file structure + +### **Step 3: Build Docker Image** ⏱️ *8 minutes* + +4. **πŸ—οΈ Build the Container Image** + ```bash + # Build with platform specification for compatibility + docker build --platform linux/amd64 -t thomasthorntoncloud:latest . + ``` + + **⏱️ Build Process:** 2-3 minutes + **βœ… Expected Output:** + ``` + [+] Building 45.2s (10/10) FINISHED + => [internal] load build definition from Dockerfile + => => transferring dockerfile: 234B + => [internal] load .dockerignore + => ... + => => naming to docker.io/library/thomasthorntoncloud:latest + ``` + +5. **πŸ“‹ Verify Image Creation** + ```bash + # List Docker images + docker images | grep thomasthorntoncloud + ``` + **βœ… Expected:** Image listed with latest tag and size ~100MB +### **Step 4: Test Docker Container Locally** ⏱️ *5 minutes* + +6. **πŸš€ Run Container Locally** + ```bash + # Run in detached mode with port mapping + docker run -d -p 5000:5000 --name thomasthorntoncloud-test thomasthorntoncloud:latest + ``` + +7. **πŸ” Verify Container Status** + ```bash + # Check container is running + docker ps + ``` + **βœ… Expected:** Container status shows "Up" with port 5000:5000 + +8. **🌐 Test Application Response** + ```bash + # Test HTTP endpoint + curl http://localhost:5000 + ``` + **βœ… Expected:** HTML response containing "Thomas Thornton Cloud" + + **πŸ–₯️ Browser Test:** Navigate to `http://localhost:5000` + +9. **🧹 Cleanup Test Container** + ```bash + # Stop and remove test container + docker stop thomasthorntoncloud-test + docker rm thomasthorntoncloud-test + ``` + +## βœ… **Validation Steps** + +**πŸ” Build Validation:** +- [ ] Docker image created successfully (`docker images` shows your image) +- [ ] Image size reasonable (~100MB for Python slim) +- [ ] No build errors or warnings in output + +**πŸš€ Runtime Validation:** +- [ ] Container starts without errors +- [ ] Application responds on port 5000 +- [ ] HTML content loads correctly +- [ ] No runtime errors in container logs (`docker logs `) + +**πŸ”§ Technical Validation:** +```bash +# Comprehensive validation script +echo "πŸ” Validating Docker build..." + +# Check image exists +if docker images | grep -q "thomasthorntoncloud"; then + echo "βœ… Docker image created successfully" +else + echo "❌ Docker image not found" + exit 1 +fi + +# Check image size (should be reasonable) +IMAGE_SIZE=$(docker images thomasthorntoncloud:latest --format "table {{.Size}}" | tail -n 1) +echo "πŸ“Š Image size: $IMAGE_SIZE" + +# Test container run +echo "πŸš€ Testing container..." +CONTAINER_ID=$(docker run -d -p 5001:5000 thomasthorntoncloud:latest) + +# Wait for startup +sleep 3 + +# Test HTTP response +if curl -s http://localhost:5001 | grep -q "Thomas Thornton"; then + echo "βœ… Application responding correctly" +else + echo "❌ Application not responding properly" +fi + +# Cleanup +docker stop $CONTAINER_ID +docker rm $CONTAINER_ID + +echo "βœ… Validation complete!" +``` + +## 🚨 **Troubleshooting Guide** + +**❌ Build Failures:** +```bash +# Problem: "no such file or directory" +# Solution: Ensure you're in the 3-Docker directory +pwd # Should show: .../3-Docker + +# Problem: Python package installation fails +# Solution: Check requirements.txt syntax +cat app/requirements.txt + +# Problem: Platform compatibility issues +# Solution: Specify platform explicitly +docker build --platform linux/amd64 -t thomasthorntoncloud:latest . +``` + +**πŸ”§ Runtime Issues:** +```bash +# Problem: Container exits immediately +# Solution: Check application logs +docker logs + +# Problem: Port already in use +# Solution: Use different port or stop conflicting service +docker run -p 8080:5000 thomasthorntoncloud:latest # Use port 8080 + +# Problem: Application not accessible +# Solution: Verify port mapping and firewall +docker port # Check port mapping +netstat -an | grep 5000 # Check if port is listening +``` + +**🧹 Common Cleanup Commands:** +```bash +# Remove all stopped containers +docker container prune + +# Remove unused images +docker image prune + +# Remove everything (use with caution) +docker system prune -a +``` + +## πŸ’‘ **Knowledge Check** + +**🎯 Quick Quiz:** +1. What base image does our Dockerfile use and why? +2. Which port does the Flask application expose? +3. How do you verify a Docker image was built successfully? +4. What's the difference between `docker run` and `docker run -d`? + +**οΏ½ Answers:** +1. `python:3.13-slim` - Provides Python runtime with minimal attack surface +2. Port `5000` - Default Flask development port +3. Use `docker images` command to list built images +4. `-d` runs container in detached mode (background) + +## 🎯 **Next Steps** + +**βœ… Upon Completion:** +- [ ] Docker image built and tested locally +- [ ] Application accessible via HTTP +- [ ] Understanding of container fundamentals +- [ ] Ready to push image to Azure Container Registry + +**➑️ Continue to:** [Push Image to ACR](./2-Push%20Image%20To%20ACR.md) + +--- + +## πŸ“š **Additional Resources** + +- πŸ”— [Docker Best Practices](https://docs.docker.com/develop/dev-best-practices/) +- πŸ”— [Flask Deployment Guide](https://flask.palletsprojects.com/en/2.3.x/deploying/) +- πŸ”— [Container Security](https://docs.docker.com/engine/security/) + +4. **Using Docker Compose**: For more complex applications with multiple services, consider using Docker Compose: + ```bash + docker-compose up -d + ``` \ No newline at end of file diff --git a/3-Docker/2-Push Image To ACR.md b/3-Docker/2-Push Image To ACR.md new file mode 100644 index 0000000..fcce1ef --- /dev/null +++ b/3-Docker/2-Push Image To ACR.md @@ -0,0 +1,119 @@ +# Push Image To Azure Container Registry (ACR) + +## 🎯 Purpose +In this lab, you'll push the Docker image you created locally to Azure Container Registry (ACR). + +## πŸ› οΈ Push Docker Image to ACR + +### Prerequisites +- [ ] Docker image created locally (from previous step) +- [ ] Access to an Azure Container Registry +- [ ] Azure CLI installed and configured + +### Steps + +1. **Verify Your ACR Access** + + First, verify that your ACR exists and you have access to it: + + ```bash + az acr show --name devopsthehardwayazurecr --query name + ``` + + > πŸ” **Note**: Replace `devopsthehardwayazurecr` with your actual ACR name. + +2. **Log Into the ACR Repository** + + ```bash + az acr login --name devopsthehardwayazurecr + ``` + + This command authenticates your Docker CLI with your Azure Container Registry. + +3. **Tag the Docker Image** + + ```bash + # Format: docker tag SOURCE_IMAGE TARGET_REGISTRY/TARGET_IMAGE:TAG + docker tag thomasthorntoncloud:latest devopsthehardwayazurecr.azurecr.io/thomasthorntoncloud:v1 + ``` + + > πŸ” **Notes**: + > - Replace `devopsthehardwayazurecr` with your ACR name + > - The `:v1` tag indicates the version of your image + > - Using semantic versioning (e.g., v1.0.0) is recommended for production images + +4. **Push the Docker Image to ACR** + + ```bash + docker push devopsthehardwayazurecr.azurecr.io/thomasthorntoncloud:v1 + ``` + + This command uploads your Docker image to your Azure Container Registry. + +5. **Verify the Image in ACR** + + ```bash + az acr repository show-tags --name devopsthehardwayazurecr --repository thomasthorntoncloud + ``` + + This will list all the tags for the thomasthorntoncloud repository in your ACR. + +## 🧠 Knowledge Check + +After pushing the image to ACR, consider these questions: +1. Why do we need to tag the Docker image before pushing it to ACR? +2. What's the significance of the version tag (e.g., `v1`) in the image name? +3. How does ACR authentication work when pushing images? +4. What role does ACR play in the overall DevOps pipeline for container deployments? + +## πŸ” Verification + +To ensure the Docker image was successfully pushed to ACR: + +1. **Using the Azure CLI**: + ```bash + az acr repository list --name devopsthehardwayazurecr --output table + ``` + +2. **Using the Azure Portal**: + - Log into the [Azure Portal](https://portal.azure.com) + - Navigate to your Azure Container Registry + - Check the "Repositories" section to see if your image is listed: + +![ACR Repository View](images/acr.png) + +## πŸ’‘ Pro Tips + +1. **Use Immutable Tags in Production**: + For production scenarios, consider using unique tags for each image build (like commit hashes or build IDs) rather than reusing tags like "latest". + + ```bash + # Example using a timestamp for unique tagging + BUILD_ID=$(date +%Y%m%d%H%M%S) + docker tag thomasthorntoncloud:latest devopsthehardwayazurecr.azurecr.io/thomasthorntoncloud:$BUILD_ID + docker push devopsthehardwayazurecr.azurecr.io/thomasthorntoncloud:$BUILD_ID + ``` + +2. **Enable Image Scanning**: + Consider enabling vulnerability scanning in your ACR: + + ```bash + az acr update --name devopsthehardwayazurecr --enable-defender + ``` + +3. **Set Up Geo-replication for Production**: + For high-availability production scenarios, consider enabling geo-replication of your ACR: + + ```bash + az acr replication create --registry devopsthehardwayazurecr --location eastus + ``` + +4. **CI/CD Integration**: + Set up CI/CD pipelines to automatically build and push your Docker images to ACR whenever you make changes to your application code. This approach maintains consistent image tagging and versioning across environments. + +5. **Consider Repository Retention Policies**: + For busy repositories, set up retention policies to automatically clean up older images: + + ```bash + az acr config retention update --registry devopsthehardwayazurecr --status enabled --days 30 --type UntaggedManifests + ``` \ No newline at end of file diff --git a/3-Docker/Dockerfile b/3-Docker/Dockerfile new file mode 100644 index 0000000..61b9b47 --- /dev/null +++ b/3-Docker/Dockerfile @@ -0,0 +1,14 @@ +FROM python:3.13-slim + +WORKDIR /app + +# Copy requirements first for better caching +COPY app/requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy the rest of the application +COPY app . + +EXPOSE 5000 + +CMD ["python", "app.py"] \ No newline at end of file diff --git a/3-Docker/README.md b/3-Docker/README.md new file mode 100644 index 0000000..1e7874a --- /dev/null +++ b/3-Docker/README.md @@ -0,0 +1,74 @@ +# Docker for the DevOps-The-Hard-Way-Azure Project + +## Overview +This directory contains everything needed to build, run, and deploy the Docker container for the thomasthornton.cloud application, a simple Flask-based web application. The containerization process demonstrates best practices for creating efficient, secure, and production-ready Docker images. + +## Contents + +- [Creating the Docker Image](./1-Create-Docker-Image.md) - Learn how to build and run the Docker image locally +- [Pushing to Azure Container Registry](./2-Push%20Image%20To%20ACR.md) - Learn how to push your Docker image to Azure Container Registry +- [Docker Best Practices for Azure](./3-Docker-Best-Practices-For-Azure.md) - Learn about Docker best practices specifically for Azure environments + +## Directory Structure + +``` +. +β”œβ”€β”€ Dockerfile # Instructions for building the Docker image +β”œβ”€β”€ app/ # Application source code directory +β”‚ β”œβ”€β”€ app.py # Flask application entrypoint +β”‚ β”œβ”€β”€ index.html # Web application HTML template +β”‚ └── requirements.txt # Python dependencies +β”œβ”€β”€ images/ # Screenshots and documentation images +└── scripts/ # Automation scripts + └── build-push-acr.sh # Script to build and push image to ACR +``` + +## Application Details + +This is a simple Flask web application that: + +- Serves a responsive HTML page +- Uses a clean and modern UI +- Provides links to the GitHub repository + +## Key Improvements + +This implementation includes several improvements over basic Docker setups: + +1. **Multi-stage builds** for smaller, more secure production images +2. **Non-root user execution** to enhance security +3. **Health checks** for better monitoring and orchestration +4. **Optimized layer caching** for faster builds +5. **Automated build and push script** for consistent deployments +6. **Security scanning** to identify vulnerabilities + +## Automation Scripts + +The repository includes scripts to automate Docker workflows: + +```bash +# Build and push to ACR +cd 3-Docker +chmod +x scripts/build-push-acr.sh +./scripts/build-push-acr.sh [optional-tag] + +# Scan Docker image for vulnerabilities +chmod +x scripts/scan-docker-image.sh +./scripts/scan-docker-image.sh thomasthorntoncloud:latest +``` + +## Azure Integration + +This Docker container is designed to be deployed to Azure using: + +- Azure Container Registry (ACR) for image storage +- Azure Kubernetes Service (AKS) for orchestration +- GitHub Actions for CI/CD pipelines + +## Next Steps + +After working through the documentation in this folder: + +1. Review the [AKS Deployment section](../4-kubernetes_manifest/README.md) to learn how to deploy this application to Azure Kubernetes Service +2. Explore the [Terraform Static Code Analysis](../5-Terraform-Static-Code-Analysis/1-Checkov-For-Terraform.md) to implement security scanning for your infrastructure code + diff --git a/3-Docker/app/app.py b/3-Docker/app/app.py new file mode 100644 index 0000000..9fc1481 --- /dev/null +++ b/3-Docker/app/app.py @@ -0,0 +1,12 @@ +from flask import Flask, render_template +import os + +app = Flask(__name__) + +@app.route('/') +def hello(): + return render_template('index.html') + +if __name__ == "__main__": + port = int(os.environ.get("PORT", 5000)) + app.run(host='0.0.0.0', port=port) \ No newline at end of file diff --git a/3-Docker/app/requirements.txt b/3-Docker/app/requirements.txt new file mode 100644 index 0000000..29e6cf5 --- /dev/null +++ b/3-Docker/app/requirements.txt @@ -0,0 +1,2 @@ +Flask==3.1.3 +Werkzeug==3.1.8 \ No newline at end of file diff --git a/3-Docker/app/templates/index.html b/3-Docker/app/templates/index.html new file mode 100644 index 0000000..23336f3 --- /dev/null +++ b/3-Docker/app/templates/index.html @@ -0,0 +1,88 @@ + + + + + + DevOps the Hard Way + + + + +
+

Hello, World from thomasthornton.cloud

+

Explore DevOps the Hard Way Azure: Hands-on learning for real-world skills.

+ Start Learning +
+ + \ No newline at end of file diff --git a/3-Docker/images/acr.png b/3-Docker/images/acr.png new file mode 100644 index 0000000..797e16c Binary files /dev/null and b/3-Docker/images/acr.png differ diff --git a/3-Docker/scripts/build-push-acr.sh b/3-Docker/scripts/build-push-acr.sh new file mode 100755 index 0000000..bbe6a55 --- /dev/null +++ b/3-Docker/scripts/build-push-acr.sh @@ -0,0 +1,98 @@ +#!/bin/bash +# build-push-acr.sh +# +# This script automates the process of building a Docker image and pushing it to Azure Container Registry. +# It demonstrates best practices for working with Docker and ACR. +# +# Usage: +# ./build-push-acr.sh [image_tag] + +set -e # Exit immediately if a command exits with a non-zero status + +# Define colors for output +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +RED='\033[0;31m' +NC='\033[0m' # No Color + +# Functions for display +info() { + echo -e "${GREEN}INFO:${NC} $1" +} + +warn() { + echo -e "${YELLOW}WARNING:${NC} $1" +} + +error() { + echo -e "${RED}ERROR:${NC} $1" + exit 1 +} + +# Check for Azure CLI installation +if ! command -v az &> /dev/null; then + error "Azure CLI could not be found. Please install it first." +fi + +# Check for Docker installation +if ! command -v docker &> /dev/null; then + error "Docker could not be found. Please install it first." +fi + +# Check parameters +if [ $# -lt 1 ]; then + error "Usage: $0 [image_tag]" +fi + +ACR_NAME=$1 +IMAGE_TAG=${2:-v1} # Default to v1 if not provided +TIMESTAMP=$(date +%Y%m%d%H%M%S) +IMAGE_REPO="thomasthorntoncloud" +FULL_IMAGE_TAG="$ACR_NAME.azurecr.io/$IMAGE_REPO:$IMAGE_TAG" +LATEST_TAG="$ACR_NAME.azurecr.io/$IMAGE_REPO:latest" + +info "Starting Docker build and push to ACR process..." + +# Verify ACR exists +info "Verifying ACR '$ACR_NAME' exists..." +az acr show --name "$ACR_NAME" --query name -o tsv 2>/dev/null || error "ACR '$ACR_NAME' does not exist or you don't have access to it." + +# Log in to ACR +info "Logging in to ACR '$ACR_NAME'..." +az acr login --name "$ACR_NAME" || error "Failed to log in to ACR." + +# Check if we're running on Apple Silicon +if [[ "$(uname -m)" == "arm64" ]]; then + warn "Detected Apple Silicon (ARM64). Using --platform=linux/amd64 for compatibility." + PLATFORM_FLAG="--platform=linux/amd64" +else + PLATFORM_FLAG="" +fi + +# Build the Docker image +info "Building Docker image with tag '$FULL_IMAGE_TAG'..." +docker build $PLATFORM_FLAG -t "$FULL_IMAGE_TAG" . || error "Docker build failed." + +# Tag with 'latest' as well +info "Tagging image as 'latest'..." +docker tag "$FULL_IMAGE_TAG" "$LATEST_TAG" || error "Failed to tag image as latest." + +# Push to ACR +info "Pushing image to ACR..." +docker push "$FULL_IMAGE_TAG" || error "Failed to push image to ACR." +docker push "$LATEST_TAG" || error "Failed to push latest tag to ACR." + +# Verify the push +info "Verifying image in ACR..." +az acr repository show-tags --name "$ACR_NAME" --repository "$IMAGE_REPO" -o table || warn "Could not verify image in ACR." + +info "Image build and push completed successfully!" +info "Your image is available at: $FULL_IMAGE_TAG" +info "Also available as: $LATEST_TAG" + +# Show next steps +echo "" +echo "Next steps:" +echo "1. Update your Kubernetes manifests to use this image" +echo "2. Apply the manifests to your AKS cluster" +echo "3. Verify the deployment" diff --git a/4-kubernetes_manifest/1-Connect-To-AKS.md b/4-kubernetes_manifest/1-Connect-To-AKS.md new file mode 100644 index 0000000..946a0f2 --- /dev/null +++ b/4-kubernetes_manifest/1-Connect-To-AKS.md @@ -0,0 +1,262 @@ +# πŸ”— Connecting To Azure Kubernetes Service (AKS) + +> **Estimated Time:** ⏱️ **10-15 minutes** + +## 🎯 **Learning Objectives** + +By the end of this tutorial, you will: +- [ ] **Authenticate to AKS cluster** using Azure CLI +- [ ] **Configure kubectl context** for cluster management +- [ ] **Verify cluster connectivity** and node status +- [ ] **Understand kubeconfig** file structure and management +- [ ] **Execute basic kubectl commands** for cluster exploration + +## πŸ“‹ **Prerequisites** + +**βœ… Required Knowledge:** +- [ ] Basic understanding of Kubernetes concepts (clusters, nodes, pods) +- [ ] Familiarity with command-line interfaces +- [ ] Azure CLI authentication basics + +**πŸ”§ Required Tools:** +- [ ] Azure CLI installed and authenticated +- [ ] kubectl CLI installed (latest version recommended) +- [ ] Access to Azure subscription with AKS Reader permissions +- [ ] Completed: [Create AKS Cluster & IAM Roles](../2-Terraform-AZURE-Services-Creation/4-Create-AKS-Cluster-IAM-Roles.md) + +**πŸ—οΈ Infrastructure Dependencies:** +- [ ] AKS cluster successfully deployed and running +- [ ] Resource group containing AKS cluster +- [ ] Proper RBAC permissions configured + +## πŸš€ **Step-by-Step Implementation** + +### **Step 1: Verify Prerequisites** ⏱️ *3 minutes* + +1. **πŸ” Check Azure CLI Authentication** + ```bash + # Verify you're logged into Azure + az account show --output table + ``` + **βœ… Expected:** Your subscription details displayed + +2. **πŸ”§ Verify kubectl Installation** + ```bash + # Check kubectl version + kubectl version --client --output=yaml + ``` + **βœ… Expected:** Client version information (server version will show after connection) + +3. **πŸ“‹ List Available AKS Clusters** + ```bash + # List AKS clusters in your subscription + az aks list --output table + ``` + **βœ… Expected:** Your AKS cluster listed with "Succeeded" provisioning state + +### **Step 2: Connect to AKS Cluster** ⏱️ *5 minutes* + +4. **πŸ” Get AKS Credentials** + ```bash + # Replace with your actual resource group and cluster names + az aks get-credentials --resource-group devopsthehardway-rg --name devopsthehardwayaks --overwrite-existing + ``` + + **βš™οΈ Command Breakdown:** + - `--resource-group` - Azure resource group containing your AKS cluster + - `--name` - Name of your AKS cluster + - `--overwrite-existing` - Replaces existing cluster entry in kubeconfig + + **βœ… Expected Output:** + ``` + Merged "devopsthehardwayaks" as current context in /Users/[username]/.kube/config + ``` + +5. **πŸ“‚ Verify Kubeconfig Update** + ```bash + # Check current kubectl context + kubectl config current-context + ``` + **βœ… Expected:** Your AKS cluster name displayed + +6. **πŸ“Š View Kubeconfig Details** + ```bash + # Display current context configuration + kubectl config view --minify + ``` + **βœ… Expected:** Context details with cluster endpoint and user information + +### **Step 3: Verify Cluster Connectivity** ⏱️ *5 minutes* + +7. **πŸ–₯️ Check Cluster Nodes** + ```bash + # List all nodes in the cluster + kubectl get nodes -o wide + ``` + **βœ… Expected Output:** + ``` + NAME STATUS ROLES AGE VERSION + aks-default-12345678-0 Ready agent 1h v1.35.x + aks-default-12345678-1 Ready agent 1h v1.35.x + ``` + +8. **πŸ” Get Cluster Information** + ```bash + # Display cluster information + kubectl cluster-info + ``` + **βœ… Expected:** Kubernetes control plane and CoreDNS URLs + +9. **πŸ“‹ Check System Pods** + ```bash + # List system pods to verify cluster health + kubectl get pods --all-namespaces --output wide + ``` + **βœ… Expected:** All system pods in "Running" status + +10. **βš™οΈ Verify RBAC Permissions** + ```bash + # Test your permissions in the cluster + kubectl auth can-i get pods + kubectl auth can-i create deployments + ``` + **βœ… Expected:** "yes" for basic operations you have permissions for + +## βœ… **Validation Steps** + +**πŸ” Connection Validation:** +- [ ] kubectl commands execute without authentication errors +- [ ] Cluster nodes are visible and in "Ready" state +- [ ] System pods are running successfully +- [ ] kubeconfig context is properly set + +**πŸ”§ Technical Validation:** +```bash +# Comprehensive validation script +echo "πŸ”— Validating AKS connection..." + +# Check if kubectl can connect +if kubectl get nodes &>/dev/null; then + echo "βœ… kubectl can connect to cluster" + + # Check node status + NODE_COUNT=$(kubectl get nodes --no-headers | wc -l) + READY_NODES=$(kubectl get nodes --no-headers | grep -c "Ready") + echo "πŸ“Š Nodes: $READY_NODES/$NODE_COUNT Ready" + + # Check system pods + SYSTEM_PODS=$(kubectl get pods -n kube-system --no-headers | wc -l) + RUNNING_PODS=$(kubectl get pods -n kube-system --no-headers | grep -c "Running") + echo "πŸƒ System Pods: $RUNNING_PODS/$SYSTEM_PODS Running" + + # Check current context + CURRENT_CONTEXT=$(kubectl config current-context) + echo "🎯 Current Context: $CURRENT_CONTEXT" + + echo "βœ… AKS connection validation complete!" +else + echo "❌ Failed to connect to AKS cluster" + exit 1 +fi +``` + +**πŸ“Š Connectivity Checklist:** +- [ ] **Authentication** - Azure CLI session active +- [ ] **Authorization** - Proper RBAC permissions +- [ ] **Network** - Cluster API server accessible +- [ ] **Configuration** - kubeconfig properly merged +- [ ] **Functionality** - Basic kubectl operations working + +## 🚨 **Troubleshooting Guide** + +**❌ Common Connection Issues:** +```bash +# Problem: "Unable to connect to the server" +# Solution: Check Azure CLI authentication and network connectivity +az account show +az aks show --resource-group --name --query "fqdn" + +# Problem: "Forbidden" or "Unauthorized" errors +# Solution: Verify RBAC permissions +az aks show --resource-group --name --query "aadProfile" +az role assignment list --assignee $(az account show --query user.name -o tsv) + +# Problem: "No current context" error +# Solution: Reconfigure kubectl context +kubectl config get-contexts +az aks get-credentials --resource-group --name --overwrite-existing +``` + +**πŸ”§ Configuration Issues:** +```bash +# Problem: Wrong cluster context +# Solution: Switch to correct context +kubectl config get-contexts +kubectl config use-context + +# Problem: Kubeconfig corruption +# Solution: Regenerate kubeconfig +mv ~/.kube/config ~/.kube/config.backup +az aks get-credentials --resource-group --name + +# Problem: kubectl not found +# Solution: Install or update kubectl +az aks install-cli # Azure CLI method +# or use package manager (brew, apt, etc.) +``` + +**🌐 Network Troubleshooting:** +```bash +# Test cluster API server connectivity +CLUSTER_FQDN=$(az aks show --resource-group --name --query "fqdn" -o tsv) +nslookup $CLUSTER_FQDN +curl -k https://$CLUSTER_FQDN/version + +# Check firewall/proxy settings +kubectl get nodes -v=6 # Verbose output for debugging +``` + +## πŸ’‘ **Knowledge Check** + +**🎯 Kubernetes Fundamentals:** +1. What is a kubeconfig file and where is it stored? +2. What does the `az aks get-credentials` command actually do? +3. How can you manage multiple Kubernetes clusters? +4. What's the difference between authentication and authorization in Kubernetes? + +**πŸ“ Answers:** +1. **kubeconfig** is stored at `~/.kube/config` and contains cluster connection details, user credentials, and contexts +2. **Downloads cluster certificates** and creates/updates kubeconfig entries for the specified AKS cluster +3. **Multiple contexts** in kubeconfig allow switching between clusters using `kubectl config use-context` +4. **Authentication** verifies identity; **authorization** (RBAC) determines what actions are permitted + +**πŸ” Advanced Concepts:** +- **Context Management:** How would you organize kubeconfig for multiple environments? +- **Security:** What are the implications of the `--overwrite-existing` flag? +- **Automation:** How could you script cluster connections for CI/CD pipelines? + +## 🎯 **Next Steps** + +**βœ… Upon Completion:** +- [ ] Successfully connected to AKS cluster +- [ ] kubectl configured and operational +- [ ] Cluster health verified +- [ ] Understanding of kubeconfig management +- [ ] Ready to deploy Kubernetes manifests + +**➑️ Continue to:** [Create Kubernetes Manifest](./2-Create-Kubernetes-Manifest.md) + +--- + +## πŸ“š **Additional Resources** + +- πŸ”— [kubectl Cheat Sheet](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) +- πŸ”— [Organizing Cluster Access](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) +- πŸ”— [AKS Authentication](https://docs.microsoft.com/en-us/azure/aks/concepts-identity) +- πŸ”— [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) + +**🎯 Pro Tips:** +- Use **multiple contexts** for different environments (dev, staging, prod) +- Set up **kubectl aliases** for common commands (`k` for `kubectl`) +- Consider **kubectx/kubens** tools for easier context switching +- Always verify your **current context** before making changes diff --git a/4-kubernetes_manifest/2-Create-Kubernetes-Manifest.md b/4-kubernetes_manifest/2-Create-Kubernetes-Manifest.md new file mode 100644 index 0000000..393f015 --- /dev/null +++ b/4-kubernetes_manifest/2-Create-Kubernetes-Manifest.md @@ -0,0 +1,337 @@ +# πŸ“„ Create The Kubernetes Manifest + +> **Estimated Time:** ⏱️ **20-25 minutes** + +## 🎯 **Learning Objectives** + +By the end of this tutorial, you will: +- [ ] **Understand Kubernetes manifest structure** and components +- [ ] **Configure deployment specifications** with proper resource management +- [ ] **Set up service definitions** for application exposure +- [ ] **Implement health checks** for application reliability +- [ ] **Customize manifest** for your specific ACR image + +## οΏ½ **Prerequisites** + +**βœ… Required Knowledge:** +- [ ] Basic understanding of Kubernetes concepts (pods, deployments, services) +- [ ] Familiarity with YAML syntax and structure +- [ ] Container registry concepts (ACR, image tagging) + +**πŸ”§ Required Tools:** +- [ ] Text editor or IDE for YAML editing +- [ ] Access to Azure Container Registry (ACR) +- [ ] kubectl CLI configured for AKS cluster +- [ ] Completed: [Create Docker Image](../3-Docker/1-Create-Docker-Image.md) +- [ ] Completed: [Push Image to ACR](../3-Docker/2-Push%20Image%20To%20ACR.md) + +**πŸ—οΈ Infrastructure Dependencies:** +- [ ] AKS cluster connected and accessible +- [ ] Docker image built and pushed to ACR +- [ ] ACR integration with AKS cluster configured + +## πŸš€ **Step-by-Step Implementation** + +### **Step 1: Understand Manifest Architecture** ⏱️ *8 minutes* + +1. **πŸ“‚ Navigate to Kubernetes Manifest Directory** + ```bash + cd 4-kubernetes_manifest + ls -la + ``` + **βœ… Expected Files:** + - `deployment.yml` - Main Kubernetes manifest + - `1-Connect-To-AKS.md` - Connection guide + - `2-Create-Kubernetes-Manifest.md` - This tutorial + +2. **πŸ“‹ Review Manifest Structure** + ```bash + cat deployment.yml + ``` + +3. **πŸ—οΈ Understand Manifest Components** + + **πŸ“„ Namespace Definition:** + ```yaml + apiVersion: v1 + kind: Namespace + metadata: + name: thomasthorntoncloud + --- + ``` + - **Purpose:** Isolates resources and provides organization + - **Benefits:** Resource separation, RBAC scoping, easier management + + **πŸš€ Deployment Configuration:** + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: thomasthorntoncloud-deployment + namespace: thomasthorntoncloud + spec: + replicas: 2 + selector: + matchLabels: + app: thomasthorntoncloud + template: + metadata: + labels: + app: thomasthorntoncloud + spec: + containers: + - name: thomasthorntoncloud + image: /thomasthorntoncloud:latest + ports: + - containerPort: 5000 + ``` + + **🌐 Service Definition:** + ```yaml + apiVersion: v1 + kind: Service + metadata: + name: thomasthorntoncloud-service + namespace: thomasthorntoncloud + spec: + selector: + app: thomasthorntoncloud + ports: + - protocol: TCP + port: 80 + targetPort: 5000 + type: LoadBalancer + ``` + +### **Step 2: Customize Manifest Configuration** ⏱️ *8 minutes* + +4. **πŸ” Get Your ACR URL** + ```bash + # List your container registries + az acr list --output table + + # Get specific ACR login server + az acr show --name --query "loginServer" --output tsv + ``` + **βœ… Expected Format:** `.azurecr.io` + +5. **πŸ“ Update Image URL in Manifest** + ```bash + # Open deployment.yml in your preferred editor + nano deployment.yml + # or + code deployment.yml + ``` + + **🎯 Find and Replace:** + ```yaml + # Find this line (around line 24): + image: /thomasthorntoncloud:latest + + # Replace with your actual ACR URL: + image: yourregistryname.azurecr.io/thomasthorntoncloud:latest + ``` + +6. **βš™οΈ Review Enhanced Configuration Features** + + **πŸ₯ Health Checks (Liveness & Readiness Probes):** + ```yaml + livenessProbe: + httpGet: + path: / + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + httpGet: + path: / + port: 5000 + initialDelaySeconds: 5 + periodSeconds: 5 + ``` + + **πŸ“Š Resource Management:** + ```yaml + resources: + requests: + memory: "128Mi" + cpu: "250m" + limits: + memory: "256Mi" + cpu: "500m" + ``` + + **πŸ”’ Security Context:** + ```yaml + securityContext: + runAsNonRoot: true + runAsUser: 1000 + allowPrivilegeEscalation: false + ``` + +### **Step 3: Validate Manifest Configuration** ⏱️ *6 minutes* + +7. **πŸ” Syntax Validation** + ```bash + # Validate YAML syntax using kubectl + kubectl apply --dry-run=client -f deployment.yml + ``` + **βœ… Expected:** No syntax errors reported + +8. **πŸ“‹ Verify Image Accessibility** + ```bash + # Test if you can pull the image from ACR + az acr repository show --name --image thomasthorntoncloud:latest + ``` + **βœ… Expected:** Image details and manifest information + +9. **πŸ”§ Check AKS Integration with ACR** + ```bash + # Verify AKS can pull from ACR + az aks check-acr --name --resource-group --acr + ``` + **βœ… Expected:** "ACR integration is working correctly" + +## βœ… **Validation Steps** + +**πŸ” Manifest Validation:** +- [ ] YAML syntax is valid (no indentation errors) +- [ ] Image URL updated with correct ACR reference +- [ ] Resource requests and limits properly configured +- [ ] Health checks configured for application reliability +- [ ] Service type and port mapping correct + +**πŸ”§ Technical Validation:** +```bash +# Comprehensive validation script +echo "πŸ“„ Validating Kubernetes manifest..." + +# Check if manifest file exists +if [ -f "deployment.yml" ]; then + echo "βœ… Manifest file found" + + # Validate YAML syntax + if kubectl apply --dry-run=client -f deployment.yml &>/dev/null; then + echo "βœ… YAML syntax valid" + + # Check if image URL is updated + if grep -q "azurecr.io" deployment.yml; then + echo "βœ… ACR image URL configured" + else + echo "❌ ACR image URL needs to be updated" + fi + + # Count manifest objects + OBJECT_COUNT=$(kubectl apply --dry-run=client -f deployment.yml | wc -l) + echo "πŸ“Š Manifest objects: $OBJECT_COUNT" + + echo "βœ… Manifest validation complete!" + else + echo "❌ YAML syntax validation failed" + kubectl apply --dry-run=client -f deployment.yml + exit 1 + fi +else + echo "❌ deployment.yml file not found" + exit 1 +fi +``` + +**πŸ“Š Configuration Checklist:** +- [ ] **Namespace** - Creates isolated environment +- [ ] **Deployment** - Manages pod replicas and updates +- [ ] **Service** - Exposes application with LoadBalancer +- [ ] **Health Checks** - Ensures application reliability +- [ ] **Resource Limits** - Prevents resource exhaustion +- [ ] **Security Context** - Runs with minimal privileges + +## 🚨 **Troubleshooting Guide** + +**❌ Common Manifest Issues:** +```bash +# Problem: YAML indentation errors +# Solution: Use proper YAML validator +kubectl apply --dry-run=client -f deployment.yml + +# Problem: Image pull errors +# Solution: Verify ACR integration and image existence +az acr repository list --name +az aks check-acr --name --resource-group --acr + +# Problem: Resource allocation issues +# Solution: Adjust resource requests/limits +kubectl describe nodes # Check available resources +``` + +**πŸ”§ Configuration Issues:** +```bash +# Problem: Service not accessible +# Solution: Check service type and port configuration +kubectl get services -n thomasthorntoncloud +kubectl describe service thomasthorntoncloud-service -n thomasthorntoncloud + +# Problem: Health check failures +# Solution: Verify application responds on correct path/port +curl http://localhost:5000/ # Test locally first + +# Problem: Namespace issues +# Solution: Ensure namespace is created before resources +kubectl get namespaces +kubectl create namespace thomasthorntoncloud --dry-run=client -o yaml +``` + +**🧹 Manifest Cleanup:** +```bash +# Remove deployed resources if needed +kubectl delete -f deployment.yml + +# Force removal if stuck +kubectl delete namespace thomasthorntoncloud --force --grace-period=0 +``` + +## πŸ’‘ **Knowledge Check** + +**🎯 Kubernetes Fundamentals:** +1. What's the difference between a Deployment and a Pod? +2. Why do we use Namespaces in Kubernetes? +3. How do liveness and readiness probes differ? +4. What's the purpose of resource requests vs limits? + +**πŸ“ Answers:** +1. **Deployment** manages multiple pods with rollout capabilities; **Pod** is a single application instance +2. **Namespaces** provide resource isolation, RBAC scoping, and organizational boundaries +3. **Liveness** restarts unhealthy containers; **Readiness** controls traffic routing to ready containers +4. **Requests** guarantee minimum resources; **Limits** prevent resource overconsumption + +**πŸ” Advanced Concepts:** +- **Rolling Updates:** How would you update the application without downtime? +- **Scaling:** How could you automatically scale based on CPU usage? +- **Security:** What additional security measures could be implemented? + +## 🎯 **Next Steps** + +**βœ… Upon Completion:** +- [ ] Kubernetes manifest properly configured and validated +- [ ] ACR image URL updated in deployment +- [ ] Health checks and resource limits configured +- [ ] Understanding of Kubernetes manifest structure +- [ ] Ready to deploy application to AKS + +**➑️ Continue to:** [Deploy Thomasthorntoncloud App](./3-Deploy-Thomasthorntoncloud-App.md) + +--- + +## πŸ“š **Additional Resources** + +- πŸ”— [Kubernetes Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +- πŸ”— [Services and Load Balancing](https://kubernetes.io/docs/concepts/services-networking/service/) +- οΏ½ [Configure Liveness and Readiness Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) +- πŸ”— [Managing Resources for Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) + +**🎯 Pro Tips:** +- Use **kubectl explain** to understand resource specifications +- Implement **horizontal pod autoscaling** for production workloads +- Consider **Helm charts** for complex application packaging +- Set up **monitoring and alerting** for deployed applications + + diff --git a/4-kubernetes_manifest/3-Deploy-Thomasthorntoncloud-App.md b/4-kubernetes_manifest/3-Deploy-Thomasthorntoncloud-App.md new file mode 100644 index 0000000..3616022 --- /dev/null +++ b/4-kubernetes_manifest/3-Deploy-Thomasthorntoncloud-App.md @@ -0,0 +1,70 @@ +# Deploy The Thomasthorntoncloud App + +## 🎯 Purpose +In this lab, you'll deploy the Thomasthorntoncloud app to your Azure Kubernetes Service (AKS) cluster using the prepared Kubernetes manifest. + +## πŸ› οΈ Deploy the Application + +### Prerequisites +- [ ] AKS cluster provisioned +- [ ] Kubernetes manifest prepared +- [ ] kubectl configured to communicate with your AKS cluster + +### Steps + +1. **Navigate to the Kubernetes Manifest Directory** + ```bash + cd 4-kubernetes_manifest + ``` +2. Deploy the Application Components + +- Deploy the Thomasthorntoncloud app: + ```bash + kubectl create -f deployment.yml + ``` + +- Install ALB Controller: + ```bash + ./scripts/1-alb-controller-install-k8s.sh + ``` +- Install Gateway API resources: + ```bash + ./scripts/2-gateway-api-resources.sh + ``` + +3. **Verify Deployment** + Run the following command to confirm the deployment was successful: + ```bash + kubectl get deployments + ``` +4. **Access the Thomasthornton.cloud App** + +To access the Thomasthorntoncloud app via Azure Application Gateway Controller for Containers, run the following command: + ```bash + fqdn=$(kubectl get gateway gateway-01 -n thomasthorntoncloud -o jsonpath='{.status.addresses[0].value}') + echo "http://$fqdn" + ``` + + Access the Thomasthornton.cloud app using the address provided. + +You've successfully deployed the Thomasthornton.cloud app to your AKS cluster using the Kubernetes manifest: + +![](images/website.png) + +## πŸ” Verification + +To ensure your application is deployed and running correctly: +1. Check that all pods are in the 'Running' state: kubectl get pods +2. Verify that the service is exposed: kubectl get services +3. Test the application by accessing it through the provided URL + +## 🧠 Knowledge Check + +After deploying the application, consider these questions: +1. What is the purpose of the ALB Controller in this deployment? +2. How does the Gateway API enhance the application's accessibility? +3. Why is it important to verify the deployment using `kubectl get deployments`? + +## πŸ’‘ Pro Tip + +Use Kubernetes namespaces to organise and isolate your resources, especially when deploying multiple applications or environments in the same cluster. diff --git a/4-kubernetes_manifest/README.md b/4-kubernetes_manifest/README.md new file mode 100644 index 0000000..14632f9 --- /dev/null +++ b/4-kubernetes_manifest/README.md @@ -0,0 +1,56 @@ +# Kubernetes Manifests for AKS Deployment + +## Overview +This directory contains the Kubernetes manifest files and instructions needed to deploy the thomasthornton.cloud application to Azure Kubernetes Service (AKS). These manifests define how the application will run in the AKS cluster. + +## Contents + +- [Connecting to AKS](./1-Connect-To-AKS.md) - Learn how to connect to your AKS cluster +- [Creating Kubernetes Manifests](./2-Create-Kubernetes-Manifest.md) - Learn about the manifest files for deployment +- [Deploying the Application](./3-Deploy-Thomasthorntoncloud-App.md) - Deploy the application to your AKS cluster + +## Directory Structure + +``` +. +β”œβ”€β”€ deployment.yml # Kubernetes deployment manifest for the application +β”œβ”€β”€ images/ # Screenshots and documentation images +└── scripts/ # Helper scripts + β”œβ”€β”€ 1-alb-controller-install-k8s.sh # Script to install Azure Load Balancer controller + └── 2-gateway-api-resources.sh # Script to create Gateway API resources +``` + +## Deployment Process + +The deployment process follows these key steps: + +1. Connect to your AKS cluster using `kubectl` +2. Understand the Kubernetes manifest structure +3. Deploy the application using `kubectl apply` +4. Verify the deployment and access the application + +## Features + +The Kubernetes deployment in this section includes: + +- Deployment resource to manage pod replicas +- Service resource to expose the application +- Integration with Azure Load Balancer +- Configuration for scaling and high availability + +## Next Steps + +After completing the deployment: + +1. Review the [Terraform Static Code Analysis](../5-Terraform-Static-Code-Analysis/1-Checkov-For-Terraform.md) section to learn about security scanning +2. Consider implementing monitoring and observability solutions for your AKS deployment + +## Best Practices + +The Kubernetes manifests in this section follow these best practices: + +1. Resource requests and limits +2. Liveness and readiness probes +3. Proper labeling for resources +4. Security context configurations +5. Network policies (to be added) diff --git a/4-kubernetes_manifest/deployment.yml b/4-kubernetes_manifest/deployment.yml new file mode 100644 index 0000000..6da57e5 --- /dev/null +++ b/4-kubernetes_manifest/deployment.yml @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: thomasthorntoncloud +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: thomasthornton + namespace: thomasthorntoncloud +spec: + replicas: 1 + selector: + matchLabels: + app: thomasthorntoncloud + template: + metadata: + labels: + app: thomasthorntoncloud + spec: + containers: + - name: thomasthorntoncloud + image: devopsthehardwayazurecr.azurecr.io/thomasthorntoncloud:v2 # Update this line + ports: + - containerPort: 5000 + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 500m + memory: 512Mi + livenessProbe: + httpGet: + path: / + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + httpGet: + path: / + port: 5000 + initialDelaySeconds: 5 + periodSeconds: 5 +--- +apiVersion: v1 +kind: Service +metadata: + name: thomasthorntoncloud + namespace: thomasthorntoncloud +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 5000 + selector: + app: thomasthorntoncloud \ No newline at end of file diff --git a/4-kubernetes_manifest/images/website.png b/4-kubernetes_manifest/images/website.png new file mode 100644 index 0000000..e723233 Binary files /dev/null and b/4-kubernetes_manifest/images/website.png differ diff --git a/4-kubernetes_manifest/scripts/1-alb-controller-install-k8s.sh b/4-kubernetes_manifest/scripts/1-alb-controller-install-k8s.sh new file mode 100755 index 0000000..6787e62 --- /dev/null +++ b/4-kubernetes_manifest/scripts/1-alb-controller-install-k8s.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +RESOURCE_GROUP="devopsthehardway-rg" +AKS_NAME="devopsthehardwayaks" +helm_resource_namespace="azure-alb-system" +VNET_NAME="devopsthehardway-vnet" +ALB_SUBNET_NAME="appgw" +ALB_CONTROLLER_VERSION="1.9.16" + +# Create namespace (idempotent) +kubectl create namespace $helm_resource_namespace --dry-run=client -o yaml | kubectl apply -f - + +ALB_CLIENT_ID=$(az identity show -g $RESOURCE_GROUP -n azure-alb-identity --query clientId -o tsv) + +# Install or upgrade the ALB controller +if helm status alb-controller -n $helm_resource_namespace &>/dev/null; then + helm upgrade alb-controller oci://mcr.microsoft.com/application-lb/charts/alb-controller \ + --namespace $helm_resource_namespace \ + --version $ALB_CONTROLLER_VERSION \ + --set albController.namespace=$helm_resource_namespace \ + --set albController.podIdentity.clientID=$ALB_CLIENT_ID +else + helm install alb-controller oci://mcr.microsoft.com/application-lb/charts/alb-controller \ + --namespace $helm_resource_namespace \ + --version $ALB_CONTROLLER_VERSION \ + --set albController.namespace=$helm_resource_namespace \ + --set albController.podIdentity.clientID=$ALB_CLIENT_ID +fi diff --git a/4-kubernetes_manifest/scripts/2-gateway-api-resources.sh b/4-kubernetes_manifest/scripts/2-gateway-api-resources.sh new file mode 100755 index 0000000..7d4ee70 --- /dev/null +++ b/4-kubernetes_manifest/scripts/2-gateway-api-resources.sh @@ -0,0 +1,44 @@ +RESOURCE_GROUP='devopsthehardway-rg' +ALB_RESOURCE_NAME='devopsthehardway-alb' +ALB_FRONTEND_NAME='alb-frontend' + +RESOURCE_ID=$(az network alb show --resource-group $RESOURCE_GROUP --name $ALB_RESOURCE_NAME --query id -o tsv) + +# Create a Gateway +kubectl apply -f - < **Estimated Time:** ⏱️ **15-20 minutes** + +## 🎯 **Learning Objectives** + +By the end of this tutorial, you will: +- [ ] **Install and configure Checkov** for Terraform security scanning +- [ ] **Run static code analysis** on your Terraform configurations +- [ ] **Interpret scan results** and understand security findings +- [ ] **Implement security fixes** based on Checkov recommendations +- [ ] **Integrate security scanning** into your development workflow + +## πŸ“‹ **Prerequisites** + +**βœ… Required Knowledge:** +- [ ] Basic understanding of Terraform syntax and structure +- [ ] Familiarity with Infrastructure as Code (IaC) concepts +- [ ] Basic Python package management (pip) + +**πŸ”§ Required Tools:** +- [ ] Python 3.6+ installed +- [ ] pip or pip3 package manager +- [ ] Terminal/command line access +- [ ] Terraform configurations to scan +- [ ] Completed: Previous Terraform tutorials with infrastructure code + +**πŸ—οΈ Infrastructure Dependencies:** +- [ ] Terraform configurations from previous tutorials +- [ ] Access to terminal for command execution + +## πŸš€ **Step-by-Step Implementation** + +### **Step 1: Install Checkov** ⏱️ *5 minutes* + +1. **🐍 Verify Python Installation** + ```bash + # Check Python version (3.6+ required) + python3 --version + # or + python --version + ``` + **βœ… Expected:** Python 3.6.0 or higher + +2. **πŸ“¦ Install Checkov Package** + ```bash + # Install latest compatible version + pip3 install checkov==3.2.4 + ``` + + OR + + ```bash + pip3 install checkov==3.2.4 + ``` + +2. **Run Checkov** + Run the following command in your terminal: + ```bash + checkov + ``` + You'll see a prompt to set up the free Bridgecrew UI. Press `Y` to start the process. + +3. **Scan Terraform Code** + + Run the following command to scan the Terraform code: + ```bash + checkov --directory + ``` + + For example: + ```bash + checkov --directory DevOps-The-Hard-Way-Azure/Terraform-AZURE-Services-Creation/1-acr + ``` + +## πŸ” Verification + +To ensure Checkov is working correctly: +1. Check that the scan completes without errors +2. Review the list of passed and failed tests in the terminal output +3. Verify that you can access the results in the Bridgecrew UI + +## 🧠 Knowledge Check + +After running Checkov, consider these questions: +1. What types of issues does Checkov identify in Terraform code? +2. How does Checkov differ from other Terraform validation tools? +3. What are the benefits of using the Bridgecrew UI alongside Checkov? + +## πŸ’‘ Pro Tip + +Use Checkov's `--compact flag` to get a more concise output, or `--quiet` to only see failed checks. This can be helpful when integrating with CI/CD pipelines. \ No newline at end of file diff --git a/5-Terraform-Static-Code-Analysis/2-tfsec.md b/5-Terraform-Static-Code-Analysis/2-tfsec.md new file mode 100644 index 0000000..f47f4d1 --- /dev/null +++ b/5-Terraform-Static-Code-Analysis/2-tfsec.md @@ -0,0 +1,144 @@ +# tfsec For Terraform Security Scanning + +## 🎯 Purpose +In this lab, you'll learn how to use tfsec, a static analysis security scanner for your Terraform code, and integrate it into your GitHub Actions workflow for automated security checks. + +## πŸ› οΈ Install and Run tfsec + +### Prerequisites +- [ ] Basic understanding of Terraform +- [ ] GitHub repository with Terraform code +- [ ] Permissions to update GitHub Actions workflows + +### Steps + +1. **Install tfsec Locally** + + Install tfsec using one of the following methods: + + **Homebrew (macOS/Linux)**: + ```bash + brew install tfsec + ``` + + **Docker**: + ```bash + docker run --rm -it -v "$(pwd):/src" aquasec/tfsec /src + ``` + + **Go**: + ```bash + go install github.com/aquasecurity/tfsec/cmd/tfsec@latest + ``` + + **Chocolatey (Windows)**: + ```bash + choco install tfsec + ``` + +2. **Run tfsec Locally** + + Run tfsec against your Terraform code: + ```bash + tfsec /path/to/terraform/code + ``` + + For example: + ```bash + tfsec DevOps-The-Hard-Way-Azure/2-Terraform-Azure-services-creation/4-aks + ``` + +3. **Add tfsec to GitHub Actions Workflow** + + Open your GitHub Actions workflow file (`.github/workflows/main.yml`) and add the tfsec action: + + ```yaml + - name: tfsec + uses: aquasecurity/tfsec-pr-commenter-action@v1.3.0 + with: + tfsec_args: --soft-fail + github_token: ${{ github.token }} + ``` + + The `--soft-fail` argument ensures the workflow doesn't fail when security issues are found, but still reports them as comments on your PR. + +4. **Understanding tfsec Results** + + tfsec checks include: + - [ ] Insecure security group rules + - [ ] Unencrypted resources + - [ ] Public exposure of sensitive resources + - [ ] Missing logging configurations + - [ ] IAM misconfigurations + - [ ] Azure-specific security best practices + +## πŸ” Verification + +To ensure tfsec is working correctly: +1. Run tfsec locally to see immediate results +2. Create a pull request with Terraform code changes +3. Verify that tfsec is adding security-related comments to your PR +4. Review and address the issues identified + +Example tfsec output: + +``` + Results: + HIGH: Resource 'azurerm_storage_account.storage' uses unencrypted storage for account 'mystorageaccount' + Impact: Data could be read if compromised + Resolution: Enable encryption for storage accounts + More info: https://aquasecurity.github.io/tfsec/v1.28.0/checks/azure/storage/encrypt-in-transit/ + File: ./storage.tf:Line:1:Column:1 +``` + +## 🧠 Knowledge Check + +After integrating tfsec, consider these questions: +1. How does tfsec differ from Checkov in its approach to security scanning? +2. What are the benefits of having security checks integrated directly into the PR process? +3. How would you handle false positives in tfsec findings? +4. What is the significance of the `--soft-fail` flag in the GitHub Action? + +## πŸ’‘ Pro Tips + +1. **Customise Checks with .tfsec.yml** + + Create a `.tfsec.yml` file in your repository root to customise which checks to include or exclude: + + ```yaml + exclude: + # Exclude a specific check + - azure-storage-use-secure-tls-policy + + # Set minimum severity level + minimum_severity: MEDIUM + ``` + +2. **Generate a Baseline** + + If you have existing issues that you want to ignore temporarily: + + ```bash + tfsec --soft-fail --out=tfsec.baseline ./path/to/code + ``` + + Then use the baseline in future scans: + + ```bash + tfsec --baseline tfsec.baseline ./path/to/code + ``` + +3. **Output Formats** + + tfsec supports multiple output formats for CI/CD integration: + + ```bash + # JSON output + tfsec --format=json ./path/to/code + + # SARIF format (for GitHub Code Scanning) + tfsec --format=sarif ./path/to/code + + # JUnit format (for test reporting) + tfsec --format=junit ./path/to/code + ``` diff --git a/6-Terraform-Docs/1-Setup-Terraform-Docs.md b/6-Terraform-Docs/1-Setup-Terraform-Docs.md new file mode 100644 index 0000000..31febc1 --- /dev/null +++ b/6-Terraform-Docs/1-Setup-Terraform-Docs.md @@ -0,0 +1,566 @@ +# πŸ“– Set Up Terraform-docs with GitHub Actions + +> **Estimated Time:** ⏱️ **20-25 minutes** + +## 🎯 **Learning Objectives** + +By the end of this tutorial, you will: +- [ ] **Understand terraform-docs** utility and its benefits +- [ ] **Configure GitHub Actions** for automated documentation generation +- [ ] **Set up README templates** with proper injection markers +- [ ] **Customize documentation output** for different modules +- [ ] **Implement automated documentation** workflow in your repository + +## πŸ“‹ **Prerequisites** + +**βœ… Required Knowledge:** +- [ ] Basic understanding of GitHub Actions workflows +- [ ] Familiarity with Terraform module structure +- [ ] Markdown syntax and formatting +- [ ] Git pull request workflow + +**πŸ”§ Required Tools:** +- [ ] GitHub repository with Terraform code +- [ ] Permissions to modify GitHub Actions workflows +- [ ] Access to create/modify README.md files +- [ ] Completed: Terraform modules from previous tutorials + +**πŸ—οΈ Infrastructure Dependencies:** +- [ ] Terraform modules with variables and outputs defined +- [ ] GitHub repository with Actions enabled +- [ ] Existing or new workflow file structure + +## πŸš€ **Step-by-Step Implementation** + +### **Step 1: Understand Terraform-docs** ⏱️ *5 minutes* + +1. **πŸ“š Learn What Terraform-docs Does** + + **🎯 Purpose:** Automatically generates documentation from Terraform modules + + **πŸ“Š Extracts Information:** + - [ ] **Inputs** - Module variables and their descriptions + - [ ] **Outputs** - Module outputs and descriptions + - [ ] **Providers** - Required Terraform providers + - [ ] **Requirements** - Terraform version constraints + - [ ] **Resources** - AWS/Azure/GCP resources created + - [ ] **Modules** - Child modules referenced + +2. **πŸ” Review Module Structure for Documentation** + ```bash + # Example: Check ACR module structure + cd 2-Terraform-AZURE-Services-Creation/1-acr + ls -la + ``` + **βœ… Required Files for Good Documentation:** + - `variables.tf` - Input parameters with descriptions + - `outputs.tf` - Module outputs with descriptions + - `versions.tf` - Provider requirements + - `README.md` - Module documentation (to be auto-generated) + +3. **πŸ“ Example of Well-Documented Variables** + ```hcl + # variables.tf + variable "acr_name" { + description = "Name of the Azure Container Registry" + type = string + validation { + condition = can(regex("^[a-zA-Z0-9]+$", var.acr_name)) + error_message = "ACR name must contain only alphanumeric characters." + } + } + + variable "location" { + description = "Azure region where resources will be created" + type = string + default = "uksouth" + } + ``` + +### **Step 2: Configure GitHub Actions Workflow** ⏱️ *8 minutes* + +4. **πŸ“‚ Navigate to Workflow Directory** + ```bash + # Create .github/workflows directory if it doesn't exist + mkdir -p .github/workflows + cd .github/workflows + ``` + +5. **πŸ“„ Update Existing Workflow or Create New One** + + **Option A: Add to Existing Workflow (recommended)** + ```bash + # Edit your existing main.yml workflow + nano main.yml # or code main.yml + ``` + + **Option B: Create Dedicated Documentation Workflow** + ```bash + # Create terraform-docs.yml workflow + touch terraform-docs.yml + ``` + +6. **βš™οΈ Add Terraform-docs Step to Workflow** + ```yaml + # Add this step to your existing workflow or create new one + - name: Render terraform docs and push changes back to PR + uses: terraform-docs/gh-actions@v1.3.0 + with: + working-dir: | + 2-Terraform-AZURE-Services-Creation/1-acr + 2-Terraform-AZURE-Services-Creation/2-vnet + 2-Terraform-AZURE-Services-Creation/3-log-analytics + 2-Terraform-AZURE-Services-Creation/4-aks + output-file: README.md + output-method: inject + git-push: "true" + git-commit-message: "docs: update Terraform documentation" + ``` + + **🎯 Configuration Breakdown:** + - `working-dir` - Directories containing Terraform modules + - `output-file` - Target file for documentation (README.md) + - `output-method` - How to insert docs (inject between markers) + - `git-push` - Automatically commit changes back to PR + - `git-commit-message` - Custom commit message for documentation updates + +### **Step 3: Prepare README Templates** ⏱️ *8 minutes* + +7. **πŸ“ Create README.md Files for Each Module** + ```bash + # Example: Create README for ACR module + cd ../../2-Terraform-AZURE-Services-Creation/1-acr + + cat > README.md << 'EOF' + # Azure Container Registry (ACR) Module + + This Terraform module creates an Azure Container Registry with security best practices. + + ## Usage + + ```hcl + module "acr" { + source = "./1-acr" + acr_name = "myregistry" + resource_group_name = "my-rg" + location = "uksouth" + } + ``` + + ## Documentation + + + + + ## Security Features + + - Admin account disabled by default + - Premium SKU for production workloads + - Network access restrictions + - Vulnerability scanning enabled + EOF + ``` + +8. **πŸ”„ Repeat for Other Modules** + ```bash + # Create README for VNET module + cd ../2-vnet + cat > README.md << 'EOF' + # Azure Virtual Network (VNET) Module + + Creates Azure Virtual Network with subnets, NSGs, and Application Load Balancer. + + ## Architecture + + This module provisions: + - Virtual Network with custom address space + - Multiple subnets for different workloads + - Network Security Groups with proper associations + - Application Load Balancer for container workloads + + + + EOF + ``` + +9. **βœ… Validate Marker Placement** + ```bash + # Check all README files have proper markers + find ../.. -name "README.md" -exec grep -l "BEGIN_TF_DOCS" {} \; + ``` + +### **Step 4: Test and Customize Documentation** ⏱️ *6 minutes* + +10. **🎯 Create Custom Configuration (Optional)** + ```bash + # Create .terraform-docs.yml in repository root + cd ../../ + cat > .terraform-docs.yml << 'EOF' + formatter: "markdown table" + + sections: + show: + - requirements + - providers + - inputs + - outputs + - resources + hide: [] + + output: + file: README.md + mode: inject + template: |- + + {{ .Content }} + + + sort: + enabled: true + by: name + + settings: + anchor: true + color: true + default: true + description: true + escape: true + hide-empty: false + html: true + indent: 2 + lockfile: true + read-comments: true + required: true + sensitive: true + type: true + EOF + ``` + +11. **πŸ§ͺ Test Workflow Locally (Optional)** + ```bash + # Install terraform-docs locally for testing + # macOS + brew install terraform-docs + + # Test documentation generation + cd 2-Terraform-AZURE-Services-Creation/1-acr + terraform-docs markdown table --output-file README.md --output-mode inject . + + # Review generated content + cat README.md + ``` + +12. **πŸš€ Trigger Workflow** + ```bash + # Create a test change and push to trigger workflow + git add . + git commit -m "feat: add terraform-docs configuration" + git push origin Updates-July-2025 + + # Create pull request to trigger documentation generation + # (This can be done via GitHub UI or GitHub CLI) + ``` + +## βœ… **Validation Steps** + +**πŸ” Workflow Validation:** +- [ ] GitHub Actions workflow includes terraform-docs step +- [ ] All Terraform modules have README.md files with markers +- [ ] Configuration file properly formatted and placed +- [ ] Workflow permissions allow pushing changes + +**πŸ”§ Technical Validation:** +```bash +# Comprehensive validation script +echo "πŸ“– Validating terraform-docs setup..." + +# Check for workflow file +if [ -f ".github/workflows/main.yml" ]; then + echo "βœ… GitHub Actions workflow found" + + # Check if terraform-docs is configured + if grep -q "terraform-docs" .github/workflows/main.yml; then + echo "βœ… terraform-docs step configured" + else + echo "❌ terraform-docs step not found in workflow" + fi +else + echo "❌ GitHub Actions workflow not found" +fi + +# Check README files with markers +README_COUNT=$(find . -name "README.md" -exec grep -l "BEGIN_TF_DOCS" {} \; | wc -l) +echo "πŸ“Š README files with terraform-docs markers: $README_COUNT" + +# Check terraform modules +MODULE_COUNT=$(find . -name "*.tf" -exec dirname {} \; | sort -u | wc -l) +echo "πŸ“Š Terraform modules found: $MODULE_COUNT" + +if [ -f ".terraform-docs.yml" ]; then + echo "βœ… Custom terraform-docs configuration found" +else + echo "ℹ️ Using default terraform-docs configuration" +fi + +echo "βœ… terraform-docs validation complete!" +``` + +**πŸ“Š Documentation Quality Checklist:** +- [ ] **Variables** - All inputs documented with descriptions +- [ ] **Outputs** - All outputs documented with descriptions +- [ ] **Examples** - Usage examples provided in README +- [ ] **Architecture** - High-level module purpose explained +- [ ] **Security** - Security considerations documented + +## 🚨 **Troubleshooting Guide** + +**❌ Common Workflow Issues:** +```bash +# Problem: terraform-docs action fails +# Solution: Check workflow syntax and permissions +# Verify the workflow file syntax +cat .github/workflows/main.yml | grep -A 10 "terraform-docs" + +# Problem: No changes pushed back to PR +# Solution: Verify git-push permissions and settings +# Check if workflow has write permissions to repository + +# Problem: Documentation not generated +# Solution: Verify markers exist in README files +grep -r "BEGIN_TF_DOCS" --include="*.md" . +``` + +**πŸ”§ Configuration Issues:** +```bash +# Problem: Wrong working directories specified +# Solution: Verify module paths exist +for dir in 2-Terraform-AZURE-Services-Creation/1-acr 2-Terraform-AZURE-Services-Creation/2-vnet; do + if [ -d "$dir" ]; then + echo "βœ… $dir exists" + else + echo "❌ $dir not found" + fi +done + +# Problem: Malformed YAML configuration +# Solution: Validate YAML syntax +if command -v yamllint &> /dev/null; then + yamllint .terraform-docs.yml +else + python3 -c "import yaml; yaml.safe_load(open('.terraform-docs.yml'))" +fi +``` + +**πŸ“ Documentation Issues:** +```bash +# Problem: Poor variable descriptions +# Solution: Add meaningful descriptions to all variables +grep -r "description.*=" --include="*.tf" . | grep -v "TODO\|FIXME" + +# Problem: Missing outputs documentation +# Solution: Add descriptions to all outputs +find . -name "outputs.tf" -exec grep -L "description" {} \; +``` + +## πŸ’‘ **Knowledge Check** + +**🎯 Documentation Best Practices:** +1. Why is automated documentation important for Infrastructure as Code? +2. How does terraform-docs determine what information to include? +3. What are the benefits of injecting documentation vs maintaining separate files? +4. How can automated documentation improve team collaboration? + +**πŸ“ Answers:** +1. **Automated documentation** ensures consistency, reduces maintenance burden, and keeps docs synchronized with code changes +2. **terraform-docs** parses Terraform files for variables, outputs, providers, and resources with their metadata +3. **Injection** keeps documentation close to code, ensures synchronization, and reduces duplication +4. **Team collaboration** improves through consistent formatting, up-to-date information, and reduced onboarding time + +**πŸ” Advanced Applications:** +- **Compliance:** How could terraform-docs help with compliance documentation? +- **Multi-environment:** How would you handle documentation for different environments? +- **Integration:** What other tools could complement terraform-docs? + +## 🎯 **Next Steps** + +**βœ… Upon Completion:** +- [ ] terraform-docs GitHub Actions workflow configured +- [ ] README templates created for all modules +- [ ] Documentation generation tested and validated +- [ ] Custom configuration applied for consistent formatting +- [ ] Automated documentation integrated into development workflow + +**πŸš€ **Future Enhancements:** +- [ ] Add architecture diagrams using terraform-graph +- [ ] Implement pre-commit hooks for local development +- [ ] Create module-specific documentation standards +- [ ] Set up documentation validation in CI/CD pipeline + +--- + +## πŸ“š **Additional Resources** + +- πŸ”— [Terraform-docs Documentation](https://terraform-docs.io/) +- πŸ”— [GitHub Actions Marketplace](https://github.com/marketplace/actions/terraform-docs) +- πŸ”— [Terraform Module Best Practices](https://www.terraform.io/docs/language/modules/develop/index.html) +- πŸ”— [Documentation as Code](https://docs.github.com/en/communities/documenting-your-project-with-wikis) + +**🎯 Pro Tips:** +- **Consistent formatting** across all modules improves readability +- **Rich descriptions** in variables and outputs enhance generated documentation +- **Examples in README** templates provide context for module usage +- **Regular updates** ensure documentation stays current with code changes + +2. **Add terraform-docs GitHub Action** + + - Open your GitHub Actions workflow file (`.github/workflows/main.yml`) + ```yaml + - name: Render terraform docs and push changes back to PR + uses: terraform-docs/gh-actions@v1.3.0 + with: + working-dir: . + output-file: README.md + output-method: inject + git-push: "true" + ``` + +3. **Prepare Your README.md Files** + + - For each Terraform module where you want documentation generated, open or create a README.md file + - Add the terraform-docs markers where you want the documentation inserted: + + ```markdown + + + ``` + + These markers tell terraform-docs where to inject the generated documentation. + +4. **Test the Workflow** + + - Make a change to your Terraform code + - Create a pull request + - The GitHub Action will automatically run and update your README.md files with generated documentation + - The changes will be pushed back to the same PR + +## πŸ” Verification + +To ensure terraform-docs is working correctly: +1. Create a pull request with a change to Terraform code +2. Wait for the GitHub Action to complete +3. Check that the README.md files have been updated with documentation between the markers +4. Verify that the documentation accurately reflects your Terraform code + +Example of generated documentation: + +``` +## Requirements + +| Name | Version | +|------|---------| +| terraform | >= 1.0.0 | +| azurerm | >= 3.0.0 | + +## Providers + +| Name | Version | +|------|---------| +| azurerm | >= 3.0.0 | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| resource_group_name | Name of the resource group | `string` | n/a | yes | +| location | Azure region for resources | `string` | `"uksouth"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| acr_id | ID of the Azure Container Registry | +``` + +## 🧠 Knowledge Check + +After setting up terraform-docs, consider these questions: +1. Why is automated documentation important for infrastructure as code? +2. How does terraform-docs determine what to include in the documentation? +3. What are the benefits of injecting documentation into README files versus maintaining separate docs? +4. How can terraform-docs improve collaboration in a team environment? + +## πŸ’‘ Pro Tips + +1. **Customising Output Format** + + You can customise the output format using a `.terraform-docs.yml` file in your repository root: + + ```yaml + formatter: "markdown table" + + sections: + show: + - requirements + - providers + - inputs + - outputs + - resources + + output: + file: README.md + mode: inject + template: |- + + {{ .Content }} + + ``` + +2. **Module-Specific Configuration** + + For different documentation in different modules, create module-specific configuration files: + + ``` + my-terraform-project/ + β”œβ”€β”€ .terraform-docs.yml # Default config + β”œβ”€β”€ module1/ + β”‚ β”œβ”€β”€ .terraform-docs.yml # Module-specific config + β”‚ └── README.md + └── module2/ + └── README.md + ``` + +3. **Configure Which Files to Document** + + In your GitHub workflow, specify which directories to document: + + ```yaml + with: + working-dir: | + terraform/module1 + terraform/module2 + output-file: README.md + output-method: inject + git-push: "true" + ``` + +4. **Pre-Commit Hook for Local Development** + + Install terraform-docs locally and set up a pre-commit hook to maintain documentation during development: + + ```bash + # Install terraform-docs (macOS) + brew install terraform-docs + + # Run manually + terraform-docs markdown table --output-file README.md --output-mode inject ./my-module + ``` + +5. **Adding Diagrams** + + Consider enhancing your documentation with architecture diagrams using tools like [Terraform Graph](https://github.com/offu/terraform-graph): + + ```bash + terraform graph | dot -Tsvg > graph.svg + ``` + + Then include the SVG in your README.md outside the terraform-docs markers. diff --git a/6-Terraform-Docs/README.md b/6-Terraform-Docs/README.md new file mode 100644 index 0000000..70affa5 --- /dev/null +++ b/6-Terraform-Docs/README.md @@ -0,0 +1,36 @@ +# Terraform Documentation Automation + +## Overview +This directory contains instructions for implementing automated documentation generation for Terraform code using terraform-docs and GitHub Actions. Proper documentation is a critical aspect of infrastructure as code that is often overlooked but can greatly improve collaboration and maintenance. + +## Labs in this Section + +### [1. Set Up Terraform-docs with GitHub Actions](./1-Setup-Terraform-Docs.md) +Learn how to automate the creation and maintenance of Terraform documentation using terraform-docs and GitHub Actions. + +## Features and Benefits + +- Automated documentation generation on every pull request +- Consistent documentation format across all Terraform code +- Documentation that stays in sync with your infrastructure code +- Improved developer experience and onboarding +- Enhanced collaboration through better documentation + +## Integration with DevOps Workflow + +The terraform-docs tool integrates seamlessly with your existing DevOps workflow: + +1. Developers write or update Terraform code +2. A pull request is created +3. GitHub Actions automatically generates or updates documentation +4. Documentation changes are committed back to the PR +5. Reviewers can see both code and documentation changes together +6. The PR is merged with complete, up-to-date documentation + +## Best Practices Applied + +- Documentation as code +- Automated processes to ensure consistency +- Integration with existing CI/CD pipelines +- Standardised format for better readability +- Version-controlled documentation that evolves with your code diff --git a/AWS/1-Configure-Credentials-To-Access-AWS.md b/AWS/1-Configure-Credentials-To-Access-AWS.md deleted file mode 100644 index f4d95fb..0000000 --- a/AWS/1-Configure-Credentials-To-Access-AWS.md +++ /dev/null @@ -1,13 +0,0 @@ -# Configure Credentials To Access AWS At The Programmatic Level - -The purpse of this lab is to configure IAM credentials on your local computer so that you can access AWS at a programmatic level (SDKs, CLI, Terraform, etc.) - -## IAM -1. Open up the AWS management console and go to IAM -2. Create a new user or use your exististing AWS user -3. Give the user programmatic access -4. Copy the access key and secret key - -## Install The AWS CLI - -## Configure The AWS CLI \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..6707d9d --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,89 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +## [2026-04-15] - AKS Version Upgrade + +### Changed +- **Kubernetes version**: Updated from 1.33 to 1.35 + + + +### Changed +- **Terraform**: Required version bumped from `>= 1.9.8` to `>= 1.14.0, < 2.0.0`; pinned to `1.14.8` in CI workflows +- **Azure provider (azurerm)**: Updated from `>= 4.28.0` to `>= 4.68.0, < 5.0.0` across all modules +- **GitHub Actions**: + - `actions/checkout`: v4 β†’ v6 + - `azure/login`: v2 β†’ v3 + - `azure/setup-kubectl`: v4 β†’ v5 + - `azure/setup-helm`: v4 β†’ v5 + - `docker/setup-buildx-action`: v3 β†’ v4 + - `hashicorp/setup-terraform`: v3 β†’ v4 + - `stefanzweifel/git-auto-commit-action`: v5 β†’ v7 +- **Flask**: 3.0.3 β†’ 3.1.3 +- **Werkzeug**: 3.0.4 β†’ 3.1.8 + +### Fixed +- `scripts/deploy-all.sh`: Resolved path-resolution bug where all `cd` calls were relative to the working directory rather than the repo root, causing failures from step 2 onwards. Script now computes `REPO_ROOT` from its own location and uses absolute paths throughout. +- `scripts/deploy-all.sh`: `terraform init` now passes `-backend-config` flags matching the dynamic `${PROJECT_NAME}` storage account, consistent with the GitHub Actions workflow. +- `scripts/deploy-all.sh`: Fixed deprecated `--query objectId` β†’ `--query id` for Azure AD group creation. + +### Documentation +- `Test-lab-only.md`: Rewritten with accurate version table, AKS version verification step, environment variable reference, and step-by-step description of what the deploy script does. +- `2-Terraform-AZURE-Services-Creation/README.md` and `1-Create-ACR.md`: Updated Terraform version prerequisite references. + +## [2025-07-28] - Major Update + +### Added +- Kubernetes health checks and readiness probes to deployment manifest +- Auto-scaling configuration for AKS cluster (min: 1, max: 5 nodes) +- Azure availability zones support for improved resilience +- Network policy support for enhanced security +- Automatic upgrade channel for patch updates +- Enhanced resource limits for better performance + +### Changed +- **Kubernetes version**: Updated from 1.32 to 1.33 +- **Terraform version**: Updated from 1.11.0 to 1.9.8 +- **Azure provider**: Updated from 4.27.0 to 4.28.0+ +- **Python base image**: Updated from 3.12-slim to 3.13-slim +- **Flask**: Updated from 2.3.3 to 3.0.3 +- **Werkzeug**: Updated from 2.3.8 to 3.0.4 +- **ALB Controller**: Updated from 1.0.0 to 1.7.9 +- **tfsec GitHub Action**: Updated from v1.2.0 to v1.3.0 +- **terraform-docs GitHub Action**: Updated from @main to v1.3.0 +- **Checkov**: Pinned to specific version 3.2.4 for consistency + +### Enhanced +- **AKS Configuration**: + - Enabled Azure RBAC for improved security + - Added automatic scaling capabilities + - Configured network policies for better security + - Added availability zones for high availability + - Improved network configuration with DNS and service CIDR + +- **Container Configuration**: + - Increased memory limits from 256Mi to 512Mi + - Increased CPU limits from 250m to 500m + - Added liveness and readiness probes + - Updated container image tag from v1 to v2 + +- **CI/CD Pipeline**: + - Enhanced GitHub Actions workflow with latest action versions + - Added proper commit user email for auto-commit action + - Updated Terraform version management + +### Security Improvements +- Enabled Azure RBAC on AKS cluster for enhanced role-based access control +- Added network policies for better pod-to-pod communication security +- Updated all dependencies to latest secure versions +- Enhanced container security with health checks + +### Documentation Updates +- Updated all version references throughout documentation +- Enhanced README with version information table +- Improved setup instructions with latest tool versions +- Added comprehensive changelog for tracking changes + +## Previous Versions +- Initial release with Kubernetes 1.32, Terraform 1.11.0, and Python 3.12 diff --git a/Docker/1-Create-Docker-Image.md b/Docker/1-Create-Docker-Image.md deleted file mode 100644 index ff77fd0..0000000 --- a/Docker/1-Create-Docker-Image.md +++ /dev/null @@ -1,39 +0,0 @@ -# Creating the Docker image for the Uber app - -In this lab you will create a Docker image to containerize the Uber app. - -## Create The Docker Image - -1. `cd` into the *Docker* directory where you will see a *Dockerfile* and *app* directory. The app directory is what stores the Python application and the Dockerfile will be used to build the app. - -2. Open the Dockerfile - -3. Within the Dockerfile, you'll see a few key components - - The Docker image that's being used is Python. It's using the latest version - - There's a new directory being created called `/build`, which is where the Python app will reside - - The *app* directory will be copied into the `/build` directory, along with the `requirements.txt` file to install all of the Python requirements for the app - - The app will run as soon as the container gets created and comes up - -4. To create the Docker image, you'll run the following command: -`docker build -t uberapp .` - -The `-t` is for the tag (the name) of the Docker image and the `.` is telling the Docker CLI that the Dockerfile is in the current directory - -5. After the Docker image is created, run the following command to confirm the Docker image is on your machine. -`docker image ls` - -## Run The Docker Image Locally - -Now that the Docker image is created, you can run the container locally just to confirm it'll work and not crash. - -1. To run the Docker container, run the following command: -`docker run -tid uber` - -- `t` stands for a TTY console -- `i` stands for interactive -- `d` stands for detach so your terminal isn't directly connected to the Docker container - -2. To confirm the Docker container is running, run the following command: -`docker container ls` - -You should now see the container running. diff --git a/Docker/Dockerfile b/Docker/Dockerfile deleted file mode 100644 index 4a713e1..0000000 --- a/Docker/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM python:latest - -RUN mkdir /build -WORKDIR /build - -COPY app /build - -COPY app/requirements.txt /build - -RUN pip install -r requirements.txt - -EXPOSE 5000 - -CMD [ "python", "app.py" ] \ No newline at end of file diff --git a/Docker/Push Image To ECR.md b/Docker/Push Image To ECR.md deleted file mode 100644 index 1cf0a57..0000000 --- a/Docker/Push Image To ECR.md +++ /dev/null @@ -1,17 +0,0 @@ -# Push Image To ECR - -The ECR repo will be where you store the Docker image that you created on your local computer in step 1. - -## Log Into The ECR Repository -1. Terraform Code -2. Log in to ECR with AWS CLI -`aws ecr get-login-password --region *your_aws_region* | docker login --username AWS --password-stdin *your_aws_account_id*.dkr.ecr.*your_aws_region*.amazonaws.com` - - -## Tag The Docker image -1. Tag the Docker image -`docker tag uber *your_aws_account_id*.dkr.ecr.*your_aws_region*.amazonaws.com` - -## Push The Docker Image To ECR -1. Push the Docker image to ECR -`docker push *your_aws_account_id*.dkr.ecr.us-east-1.amazonaws.com/*repo_name*` diff --git a/Docker/app/.gitignore b/Docker/app/.gitignore deleted file mode 100644 index 7c6c6ce..0000000 --- a/Docker/app/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.pyc -*.egg-info -.coverage -env/ diff --git a/Docker/app/.travis.yml b/Docker/app/.travis.yml deleted file mode 100644 index b5c4ab9..0000000 --- a/Docker/app/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: python -install: make bootstrap -script: make -branches: - except: - - /^v[0-9]/ -after_success: coveralls diff --git a/Docker/app/Makefile b/Docker/app/Makefile deleted file mode 100644 index bd698c9..0000000 --- a/Docker/app/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -.PHONY: bootstrap clean lint test -.DEFAULT_GOAL := test - -test: clean lint - @py.test test/ --cov app.py -s - -lint: - @flake8 . - -clean: - @find . -type f -name '*.pyc' -delete - -bootstrap: - @pip install -r requirements.txt - @pip install -r requirements-test.txt - @python setup.py develop diff --git a/Docker/app/Procfile b/Docker/app/Procfile deleted file mode 100644 index 244c130..0000000 --- a/Docker/app/Procfile +++ /dev/null @@ -1 +0,0 @@ -web: gunicorn app:app --log-file=- diff --git a/Docker/app/README.md b/Docker/app/README.md deleted file mode 100644 index dc09eb6..0000000 --- a/Docker/app/README.md +++ /dev/null @@ -1,56 +0,0 @@ -Example Uber app for developers -============================== - -[![TravisCI](https://travis-ci.org/uber/Python-Sample-Application.svg?branch=master)](https://travis-ci.org/uber/Python-Sample-Application) -[![Coverage Status](https://coveralls.io/repos/uber/Python-Sample-Application/badge.png)](https://coveralls.io/r/uber/Python-Sample-Application) - -https://developer.uber.com/ - -What Is This? -------------- - -This is a simple Python/Flask application intended to provide a working example of Uber's external API. The goal of these endpoints is to be simple, well-documented and to provide a base for developers to develop other applications off of. - - -How To Use This ---------------- - -1. Navigate over to https://developer.uber.com/, and sign up for an Uber developer account. -2. Register a new Uber application and make your Redirect URI `http://localhost:7000/submit` - ensure that both the `profile` and `history` OAuth scopes are checked. -3. Fill in the relevant information in the `config.json` file in the root folder and add your client id and secret as the environment variables `UBER_CLIENT_ID` and `UBER_CLIENT_SECRET`. -4. Run `export UBER_CLIENT_ID="`*{your client id}*`"&&export UBER_CLIENT_SECRET="`*{your client secret}*`"` -5. Run `pip install -r requirements.txt` to install dependencies -6. Run `python app.py` -7. Navigate to http://localhost:7000 in your browser - - -Testing -------- - -1. Install the dependencies with `make bootstrap` -2. Run the command `make test` -3. If you delete the fixtures, or decide to add some of your own, you’ll have to re-generate them, and the way this is done is by running the app, getting an auth_token from the main page of the app. Paste that token in place of the `test_auth_token` at the top of the `test_endpoints.py` file, then run the tests. - - -Development ------------ - -If you want to work on this application we’d love your pull requests and tickets on GitHub! - -1. If you open up a ticket, please make sure it describes the problem or feature request fully. -2. If you send us a pull request, make sure you add a test for what you added, and make sure the full test suite runs with `make test`. - -Deploy to Heroku ----------------- - -Click the button below to set up this sample app on Heroku: - -[![Deploy](https://www.herokucdn.com/deploy/button.png)](https://heroku.com/deploy) - -After creating your app on Heroku, you have to configure the redirect URL for your Uber OAuth app. Use a `https://`*{your-app-name}*`.herokuapp.com/submit` URL. -You will also want to configure the heroku environment variable FLASK_DEBUG=False in order to properly serve SSL traffic. - -Making Requests ---------------- - -The base for all requests is https://api.uber.com/v1/, to find a list of all available endpoints, please visit: https://developer.uber.com/v1/endpoints/ diff --git a/Docker/app/app.json b/Docker/app/app.json deleted file mode 100644 index a0bee26..0000000 --- a/Docker/app/app.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "name": "Uber API Python/Flask sample", - "logo": "http://blogcdn.uber.com/wp-content/uploads/2011/12/New-Logo-Vertical-Dark.jpg", - "repository": "https://github.com/uber/Python-Sample-Application", - "keywords": ["uber", "python", "flask"], - "env": { - "UBER_CLIENT_ID": { - "description": "Your Uber API client id" - }, - "UBER_CLIENT_SECRET": { - "description": "Your Uber API client secret" - } - } -} diff --git a/Docker/app/app.py b/Docker/app/app.py deleted file mode 100644 index 423f06a..0000000 --- a/Docker/app/app.py +++ /dev/null @@ -1,233 +0,0 @@ -from __future__ import absolute_import - -import json -import os -from urllib.parse import urlparse - -from flask import Flask, render_template, request, redirect, session -from flask_sslify import SSLify -from rauth import OAuth2Service -import requests - -app = Flask(__name__, static_folder='static', static_url_path='') -app.requests_session = requests.Session() -app.secret_key = os.urandom(24) - -sslify = SSLify(app) - -with open('config.json') as f: - config = json.load(f) - - -def generate_oauth_service(): - """Prepare the OAuth2Service that is used to make requests later.""" - return OAuth2Service( - client_id=os.environ.get('UBER_CLIENT_ID'), - client_secret=os.environ.get('UBER_CLIENT_SECRET'), - name=config.get('name'), - authorize_url=config.get('authorize_url'), - access_token_url=config.get('access_token_url'), - base_url=config.get('base_url'), - ) - - -def generate_ride_headers(token): - """Generate the header object that is used to make api requests.""" - return { - 'Authorization': 'bearer %s' % token, - 'Content-Type': 'application/json', - } - - -@app.route('/health', methods=['GET']) -def health(): - """Check the status of this application.""" - return ';-)' - - -@app.route('/', methods=['GET']) -def signup(): - """The first step in the three-legged OAuth handshake. - - You should navigate here first. It will redirect to login.uber.com. - """ - params = { - 'response_type': 'code', - 'redirect_uri': get_redirect_uri(request), - 'scopes': ','.join(config.get('scopes')), - } - url = generate_oauth_service().get_authorize_url(**params) - return redirect(url) - - -@app.route('/submit', methods=['GET']) -def submit(): - """The other two steps in the three-legged Oauth handshake. - - Your redirect uri will redirect you here, where you will exchange - a code that can be used to obtain an access token for the logged-in use. - """ - params = { - 'redirect_uri': get_redirect_uri(request), - 'code': request.args.get('code'), - 'grant_type': 'authorization_code' - } - response = app.requests_session.post( - config.get('access_token_url'), - auth=( - os.environ.get('UBER_CLIENT_ID'), - os.environ.get('UBER_CLIENT_SECRET') - ), - data=params, - ) - session['access_token'] = response.json().get('access_token') - - return render_template( - 'success.html', - token=response.json().get('access_token') - ) - - -@app.route('/demo', methods=['GET']) -def demo(): - """Demo.html is a template that calls the other routes in this example.""" - return render_template('demo.html', token=session.get('access_token')) - - -@app.route('/products', methods=['GET']) -def products(): - """Example call to the products endpoint. - - Returns all the products currently available in San Francisco. - """ - url = config.get('base_uber_url') + 'products' - params = { - 'latitude': config.get('start_latitude'), - 'longitude': config.get('start_longitude'), - } - - response = app.requests_session.get( - url, - headers=generate_ride_headers(session.get('access_token')), - params=params, - ) - - if response.status_code != 200: - return 'There was an error', response.status_code - return render_template( - 'results.html', - endpoint='products', - data=response.text, - ) - - -@app.route('/time', methods=['GET']) -def time(): - """Example call to the time estimates endpoint. - - Returns the time estimates from the given lat/lng given below. - """ - url = config.get('base_uber_url') + 'estimates/time' - params = { - 'start_latitude': config.get('start_latitude'), - 'start_longitude': config.get('start_longitude'), - } - - response = app.requests_session.get( - url, - headers=generate_ride_headers(session.get('access_token')), - params=params, - ) - - if response.status_code != 200: - return 'There was an error', response.status_code - return render_template( - 'results.html', - endpoint='time', - data=response.text, - ) - - -@app.route('/price', methods=['GET']) -def price(): - """Example call to the price estimates endpoint. - - Returns the time estimates from the given lat/lng given below. - """ - url = config.get('base_uber_url') + 'estimates/price' - params = { - 'start_latitude': config.get('start_latitude'), - 'start_longitude': config.get('start_longitude'), - 'end_latitude': config.get('end_latitude'), - 'end_longitude': config.get('end_longitude'), - } - - response = app.requests_session.get( - url, - headers=generate_ride_headers(session.get('access_token')), - params=params, - ) - - if response.status_code != 200: - return 'There was an error', response.status_code - return render_template( - 'results.html', - endpoint='price', - data=response.text, - ) - - -@app.route('/history', methods=['GET']) -def history(): - """Return the last 5 trips made by the logged in user.""" - url = config.get('base_uber_url_v1_1') + 'history' - params = { - 'offset': 0, - 'limit': 5, - } - - response = app.requests_session.get( - url, - headers=generate_ride_headers(session.get('access_token')), - params=params, - ) - - if response.status_code != 200: - return 'There was an error', response.status_code - return render_template( - 'results.html', - endpoint='history', - data=response.text, - ) - - -@app.route('/me', methods=['GET']) -def me(): - """Return user information including name, picture and email.""" - url = config.get('base_uber_url') + 'me' - response = app.requests_session.get( - url, - headers=generate_ride_headers(session.get('access_token')), - ) - - if response.status_code != 200: - return 'There was an error', response.status_code - return render_template( - 'results.html', - endpoint='me', - data=response.text, - ) - - -def get_redirect_uri(request): - """Return OAuth redirect URI.""" - parsed_url = urlparse(request.url) - if parsed_url.hostname == 'localhost': - return 'http://{hostname}:{port}/submit'.format( - hostname=parsed_url.hostname, port=parsed_url.port - ) - return 'https://{hostname}/submit'.format(hostname=parsed_url.hostname) - -if __name__ == '__main__': - app.debug = os.environ.get('FLASK_DEBUG', True) - app.run(host='0.0.0.0', port=5000) diff --git a/Docker/app/config.json b/Docker/app/config.json deleted file mode 100644 index 46e7fe6..0000000 --- a/Docker/app/config.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "access_token_url": "https://login.uber.com/oauth/token", - "authorize_url": "https://login.uber.com/oauth/authorize", - "base_url": "https://login.uber.com/", - "scopes": ["profile", "history_lite"], - "name": "Sample app", - "base_uber_url": "https://api.uber.com/v1/", - "base_uber_url_v1_1" : "https://api.uber.com/v1.1/", - "start_latitude": "37.781955", - "start_longitude": "-122.402367", - "end_latitude": "37.744352", - "end_longitude": "-122.416743" -} diff --git a/Docker/app/requirements.txt b/Docker/app/requirements.txt deleted file mode 100644 index cf39ca1..0000000 --- a/Docker/app/requirements.txt +++ /dev/null @@ -1,12 +0,0 @@ -Flask -Jinja2 -MarkupSafe -Werkzeug -gnureadline -itsdangerous -rauth -requests -urllib3 - -gunicorn -Flask-SSLify diff --git a/Docker/app/setup.cfg b/Docker/app/setup.cfg deleted file mode 100644 index 7598993..0000000 --- a/Docker/app/setup.cfg +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -max-line-length = 100 -exclude = env diff --git a/Docker/app/setup.py b/Docker/app/setup.py deleted file mode 100644 index 47b3962..0000000 --- a/Docker/app/setup.py +++ /dev/null @@ -1,10 +0,0 @@ -from setuptools import setup, find_packages - -setup( - name='Python-Sample-Application', - version='0.0.1', - author='Uber Engineering', - author_email='developer@uber.com', - packages=find_packages(), - description='Python sample application', -) diff --git a/Docker/app/static/util.js b/Docker/app/static/util.js deleted file mode 100644 index ebbe197..0000000 --- a/Docker/app/static/util.js +++ /dev/null @@ -1,7 +0,0 @@ -function action(endpoint_name) { - window.location.replace('/' + endpoint_name); -} - -function redirect_to_demo(code) { - window.location.replace('/demo'); -} diff --git a/Docker/app/templates/demo.html b/Docker/app/templates/demo.html deleted file mode 100644 index 936257e..0000000 --- a/Docker/app/templates/demo.html +++ /dev/null @@ -1,20 +0,0 @@ - - - - - - - {% if token %} -

Congratulations! you have successfully authenticated and your token is: {{ token }}

- {% else %} -

Something went wrong :(

- {% endif %} - -

Test the following functions of the api!

- - - - - - - diff --git a/Docker/app/templates/results.html b/Docker/app/templates/results.html deleted file mode 100644 index 9d4de3b..0000000 --- a/Docker/app/templates/results.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - -

Welcome to the {{ endpoint }} endpoint!

- -

Here is the result of a call to the {{ endpoint }}:

-

{{ data }}

- - diff --git a/Docker/app/templates/success.html b/Docker/app/templates/success.html deleted file mode 100644 index 621d24c..0000000 --- a/Docker/app/templates/success.html +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/Docker/app/test/fixtures/GitHub_emojis.json b/Docker/app/test/fixtures/GitHub_emojis.json deleted file mode 100644 index 429fd7a..0000000 --- a/Docker/app/test/fixtures/GitHub_emojis.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/global_preserve_exact_body_bytes.json b/Docker/app/test/fixtures/global_preserve_exact_body_bytes.json deleted file mode 100644 index f58e137..0000000 --- a/Docker/app/test/fixtures/global_preserve_exact_body_bytes.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"base64_string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["*/*"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.2.0"]}, "method": "GET", "uri": "https://httpbin.org/get"}, "response": {"body": {"base64_string": "ewogICJhcmdzIjoge30sIAogICJoZWFkZXJzIjogewogICAgIkFjY2VwdCI6ICIqLyoiLCAKICAgICJBY2NlcHQtRW5jb2RpbmciOiAiZ3ppcCwgZGVmbGF0ZSIsIAogICAgIkNvbm5lY3Rpb24iOiAiY2xvc2UiLCAKICAgICJIb3N0IjogImh0dHBiaW4ub3JnIiwgCiAgICAiVXNlci1BZ2VudCI6ICJweXRob24tcmVxdWVzdHMvMi4zLjAgQ1B5dGhvbi8yLjcuNSBEYXJ3aW4vMTMuMi4wIiwgCiAgICAiWC1SZXF1ZXN0LUlkIjogIjE1ODlmMjgzLWRkYjMtNGU0YS1hYTMzLWVjM2I0NGVlY2JjMiIKICB9LCAKICAib3JpZ2luIjogIjguMjYuMTU3LjEyOCIsIAogICJ1cmwiOiAiaHR0cHM6Ly9odHRwYmluLm9yZy9nZXQiCn0=", "encoding": null}, "headers": {"content-length": ["353"], "server": ["gunicorn/18.0"], "connection": ["keep-alive"], "access-control-allow-credentials": ["true"], "date": ["Fri, 22 Aug 2014 18:11:35 GMT"], "access-control-allow-origin": ["*"], "content-type": ["application/json"]}, "status": {"message": "OK", "code": 200}, "url": "https://httpbin.org/get"}, "recorded_at": "2014-08-22T18:11:35"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/history_failure.json b/Docker/app/test/fixtures/history_failure.json deleted file mode 100644 index df181de..0000000 --- a/Docker/app/test/fixtures/history_failure.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept": ["*/*"], "Content-Type": ["application/json"], "Accept-Encoding": ["gzip, deflate"], "Authorization": ["bearer NOT_A_CODE"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.4.0"]}, "method": "GET", "uri": "https://api.uber.com/v1.1/history?limit=5&offset=0"}, "response": {"body": {"string": "{\"message\":\"Invalid OAuth 2.0 credentials provided.\",\"code\":\"unauthorized\"}", "encoding": null}, "headers": {"content-length": ["75"], "server": ["nginx"], "connection": ["keep-alive"], "date": ["Mon, 06 Oct 2014 16:58:57 GMT"], "x-uber-app": ["uberex-nonsandbox"], "content-type": ["application/json"]}, "status": {"message": "Unauthorized", "code": 401}, "url": "https://api.uber.com/v1.1/history?limit=5&offset=0"}, "recorded_at": "2014-10-06T16:58:57"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/history_success.json b/Docker/app/test/fixtures/history_success.json deleted file mode 100644 index f0dd74b..0000000 --- a/Docker/app/test/fixtures/history_success.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept": ["*/*"], "Content-Type": ["application/json"], "Accept-Encoding": ["gzip, deflate"], "Authorization": ["bearer MLth87eHvSAaCQ1vn7jTd0xA9Kapo5"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.4.0"]}, "method": "GET", "uri": "https://api.uber.com/v1.1/history?limit=5&offset=0"}, "response": {"body": {"base64_string": "H4sIAAAAAAAAA62TTW7bMBBG78K1aHB+yOH4KkURUBSJGojtxJYWReC7d5SFi9gGaqDRjuLokW++0Yerx+Uwu20UHNyx93OzRRjc626/W18P7tfuPB9Pv932x4c7z2Vezm5rX+3fXtvcJje4yQrKoTa3BdhIzFGFMw9uWXaTlbYWShtz9TFS8Jwq+kKZfW9x7KI1tKpGMcZpfpl3+5XDgFbNRIN7Ox2npc4vn6wC9lRjVcFPFvlsdR57nSahEDisN2qH6QuJE5ndqb0v7XxzBgFdhn+L4QZDRFXQv2LcU9fCwVcu3TPW4jV08hFUEiXRVPIjMYKwUr5BjDCG9FCMAos+IYYb0IyUrd3XwEJhqG0KPvVaPRcZvSaiNbreKcaecW3ybWDMwGiU//cyUmbr0H1gtkPIT3jxBpk0RYGrFkdoHdEGr49sMoheI3dfa5NWiGxm4YEWqVnJd2gZScFI91q2AyRPaOEmZEg5iOjVq/SeRonox1p0DSn4cQT10ssIQkUCyAMv1Bjo1mviMpbSxHeekmcwqoINgRifmh0dCxvr6/9lJEl2n3svtN5lvvy8/AHuOYdTZwQAAA==", "encoding": null}, "headers": {"x-rate-limit-remaining": ["999"], "content-language": ["en"], "content-encoding": ["gzip"], "transfer-encoding": ["chunked"], "strict-transport-security": ["max-age=2592000"], "server": ["nginx"], "connection": ["keep-alive"], "x-rate-limit-reset": ["1412618400"], "x-uber-app": ["uberex-nonsandbox"], "date": ["Mon, 06 Oct 2014 17:12:33 GMT"], "x-rate-limit-limit": ["1000"], "content-type": ["application/json"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.uber.com/v1.1/history?limit=5&offset=0"}, "recorded_at": "2014-10-06T17:12:33"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/me_failure.json b/Docker/app/test/fixtures/me_failure.json deleted file mode 100644 index 7abfbff..0000000 --- a/Docker/app/test/fixtures/me_failure.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept": ["*/*"], "Content-Type": ["application/json"], "Accept-Encoding": ["gzip, deflate"], "Authorization": ["bearer NOT_A_CODE"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.2.0"]}, "method": "GET", "uri": "https://api.uber.com/v1/me"}, "response": {"body": {"string": "{\"message\":\"Invalid OAuth 2.0 credentials provided.\",\"code\":\"unauthorized\"}", "encoding": null}, "headers": {"content-length": ["75"], "server": ["nginx"], "connection": ["keep-alive"], "date": ["Fri, 22 Aug 2014 19:22:26 GMT"], "x-uber-app": ["uberex-nonsandbox"], "content-type": ["application/json"]}, "status": {"message": "Unauthorized", "code": 401}, "url": "https://api.uber.com/v1/me"}, "recorded_at": "2014-08-22T19:22:25"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/me_success.json b/Docker/app/test/fixtures/me_success.json deleted file mode 100644 index d8cb028..0000000 --- a/Docker/app/test/fixtures/me_success.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept": ["*/*"], "Content-Type": ["application/json"], "Accept-Encoding": ["gzip, deflate"], "Authorization": ["bearer vX0ye7xeg42vNcBtWv59k9K0WjB5qH"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.2.0"]}, "method": "GET", "uri": "https://api.uber.com/v1/me"}, "response": {"body": {"base64_string": "H4sIAAAAAAAAA0WMyw7CIBRE/4W1tLWlULpy78YPaNJcHhXsA0Ihxhj/XRoXLuecmXkjb2VMQaMemRj93g/lUKrzs/bOhiga2HkhF5fUFNwWi03HoawYkTXUDZ86RqhQZ8K64uH1HZ3QZMMexw3W4/EKaTewZLzAn94g2Nlk6INb3SidOrrKvAjPUK9gl5zn3/aShA6FdGtWKVmVTcsqAYwTTFnbYQICMFSa4pZIKRinilOOPl8CtsPG2gAAAA==", "encoding": null}, "headers": {"x-rate-limit-remaining": ["999"], "content-language": ["en"], "content-encoding": ["gzip"], "transfer-encoding": ["chunked"], "strict-transport-security": ["max-age=2592000"], "server": ["nginx"], "connection": ["keep-alive"], "x-rate-limit-reset": ["1408737600"], "x-uber-app": ["uberex-nonsandbox"], "date": ["Fri, 22 Aug 2014 19:22:26 GMT"], "x-rate-limit-limit": ["1000"], "content-type": ["application/json"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.uber.com/v1/me"}, "recorded_at": "2014-08-22T19:22:25"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/preserve_exact_bytes.json b/Docker/app/test/fixtures/preserve_exact_bytes.json deleted file mode 100644 index 80b274e..0000000 --- a/Docker/app/test/fixtures/preserve_exact_bytes.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"base64_string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["*/*"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.2.0"]}, "method": "GET", "uri": "https://httpbin.org/get"}, "response": {"body": {"base64_string": "ewogICJhcmdzIjoge30sIAogICJoZWFkZXJzIjogewogICAgIkFjY2VwdCI6ICIqLyoiLCAKICAgICJBY2NlcHQtRW5jb2RpbmciOiAiZ3ppcCwgZGVmbGF0ZSIsIAogICAgIkNvbm5lY3Rpb24iOiAiY2xvc2UiLCAKICAgICJIb3N0IjogImh0dHBiaW4ub3JnIiwgCiAgICAiVXNlci1BZ2VudCI6ICJweXRob24tcmVxdWVzdHMvMi4zLjAgQ1B5dGhvbi8yLjcuNSBEYXJ3aW4vMTMuMi4wIiwgCiAgICAiWC1SZXF1ZXN0LUlkIjogImFkYmJkMTUzLTk1MjYtNDk4Ni04NTI1LTZjNWUxMGNkNDA5ZSIKICB9LCAKICAib3JpZ2luIjogIjguMjYuMTU3LjEyOCIsIAogICJ1cmwiOiAiaHR0cHM6Ly9odHRwYmluLm9yZy9nZXQiCn0=", "encoding": null}, "headers": {"content-length": ["353"], "server": ["gunicorn/18.0"], "connection": ["keep-alive"], "access-control-allow-credentials": ["true"], "date": ["Fri, 22 Aug 2014 18:11:35 GMT"], "access-control-allow-origin": ["*"], "content-type": ["application/json"]}, "status": {"message": "OK", "code": 200}, "url": "https://httpbin.org/get"}, "recorded_at": "2014-08-22T18:11:34"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/price_estimates_failure.json b/Docker/app/test/fixtures/price_estimates_failure.json deleted file mode 100644 index 6092eff..0000000 --- a/Docker/app/test/fixtures/price_estimates_failure.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept": ["*/*"], "Content-Type": ["application/json"], "Accept-Encoding": ["gzip, deflate"], "Authorization": ["bearer NOT_A_CODE"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.2.0"]}, "method": "GET", "uri": "https://api.uber.com/v1/estimates/price?start_longitude=-122.402367&end_longitude=-122.416743&start_latitude=37.781955&end_latitude=37.744352"}, "response": {"body": {"string": "{\"message\":\"No authentication provided.\",\"code\":\"unauthorized\"}", "encoding": null}, "headers": {"content-length": ["63"], "server": ["nginx"], "connection": ["keep-alive"], "date": ["Fri, 01 Aug 2014 20:19:37 GMT"], "x-uber-app": ["uberex-nonsandbox"], "content-type": ["application/json"]}, "status": {"message": "Unauthorized", "code": 401}, "url": "https://api.uber.com/v1/estimates/price?start_longitude=-122.402367&end_longitude=-122.416743&start_latitude=37.781955&end_latitude=37.744352"}, "recorded_at": "2014-08-01T20:19:37"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/price_estimates_success.json b/Docker/app/test/fixtures/price_estimates_success.json deleted file mode 100644 index 680fe82..0000000 --- a/Docker/app/test/fixtures/price_estimates_success.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept": ["*/*"], "Content-Type": ["application/json"], "Accept-Encoding": ["gzip, deflate"], "Authorization": ["bearer 42Kq726Vv6lzJ0TMhXWsgUulVjRsxh"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.2.0"]}, "method": "GET", "uri": "https://api.uber.com/v1/estimates/price?start_longitude=-122.402367&end_longitude=-122.416743&start_latitude=37.781955&end_latitude=37.744352"}, "response": {"body": {"base64_string": "H4sIAAAAAAAAA5WTXU+DMBSG/8uJl0AokOG4m3pjnFcTs8SYhrXN1lg+UlrdXPbfbYcmhOLQ29P3nL7P+ThCIzlhLWQvRxA1KQT/ZBRT3jaiOOCqKBlkkG+YvFkubh/AM6IPzFrFy0LZpygJwtCEL2Q0sqaaKMyp0WtTaiMK8mZyWi23DJdaKN4IziRkKAg96FW/ihI/tuV3fLvrfxuH3bdES8kqcsCkpmenqzs4eRMoq/zZBYlnv4J0egej1e9/g4hnfpKMQCTfvfs3hG3i02J971BUWghnFj21w6CKPR+F6Cr1RvHIFJOMuhidcsBggxfnYE2tHf8wD8LURAfb9CN27O9HvTtbNPdR5BoHhAKETHzgHfKpJTr7WZrMwTGgNLi2BcfsW7nrX5hoO30GKPWjMYIIBant1yjB6+kLI8sf5N4DAAA=", "encoding": null}, "headers": {"x-rate-limit-remaining": ["4993"], "content-language": ["en"], "content-encoding": ["gzip"], "transfer-encoding": ["chunked"], "strict-transport-security": ["max-age=2592000"], "server": ["nginx"], "connection": ["keep-alive"], "x-rate-limit-reset": ["1406926800"], "x-uber-app": ["uberex-nonsandbox"], "date": ["Fri, 01 Aug 2014 20:19:37 GMT"], "x-rate-limit-limit": ["5000"], "content-type": ["application/json"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.uber.com/v1/estimates/price?start_longitude=-122.402367&end_longitude=-122.416743&start_latitude=37.781955&end_latitude=37.744352"}, "recorded_at": "2014-08-01T20:19:38"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/products_failure.json b/Docker/app/test/fixtures/products_failure.json deleted file mode 100644 index 3d6b9bf..0000000 --- a/Docker/app/test/fixtures/products_failure.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept": ["*/*"], "Content-Type": ["application/json"], "Accept-Encoding": ["gzip, deflate"], "Authorization": ["bearer NOT_A_CODE"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.2.0"]}, "method": "GET", "uri": "https://api.uber.com/v1/products?latitude=37.781955&longitude=-122.402367"}, "response": {"body": {"string": "{\"message\":\"No authentication provided.\",\"code\":\"unauthorized\"}", "encoding": null}, "headers": {"content-length": ["63"], "server": ["nginx"], "connection": ["keep-alive"], "date": ["Fri, 01 Aug 2014 20:19:37 GMT"], "x-uber-app": ["uberex-nonsandbox"], "content-type": ["application/json"]}, "status": {"message": "Unauthorized", "code": 401}, "url": "https://api.uber.com/v1/products?latitude=37.781955&longitude=-122.402367"}, "recorded_at": "2014-08-01T20:19:38"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/products_success.json b/Docker/app/test/fixtures/products_success.json deleted file mode 100644 index cf80a63..0000000 --- a/Docker/app/test/fixtures/products_success.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept": ["*/*"], "Content-Type": ["application/json"], "Accept-Encoding": ["gzip, deflate"], "Authorization": ["bearer 42Kq726Vv6lzJ0TMhXWsgUulVjRsxh"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.2.0"]}, "method": "GET", "uri": "https://api.uber.com/v1/products?latitude=37.781955&longitude=-122.402367"}, "response": {"body": {"base64_string": "H4sIAAAAAAAAA7XSQU/CMBQH8K/S9AxbVOJhN/Rk5KTDkDhDHl23NXZ9TfsGDMJ3t0NjjBuc4NJD//m//PLy9tw6zBtBnifvey7AglDU8mQy4qqGUvKEV0Q2yeIszm/grph4C7uds5NIaGzywqGhyEjKYgFuTK2VPotrNPj9jlcaxGdkTclHPFfeamiXBupu8Hwl3cNs+vgcoh/GUuUhaEJw7HUd6YVTlhSakKSVZOhUqQxo1vX5YfSXfX8ptm/Wp9Gv87chcuj0wC+INSvQMbmWrkUj/4EvtmeCrRoWd9tMp4unIXLX6pnT8Mk2iipsiFHYeAXe66vJO9/2NH0x5N720cGpcTMW6Omql3Hk6tsz4NmgWPfIv1ynwpUfr0SDKyUrHTbW88PH4Qugn78toAMAAA==", "encoding": null}, "headers": {"x-rate-limit-remaining": ["4992"], "content-language": ["en"], "content-encoding": ["gzip"], "transfer-encoding": ["chunked"], "strict-transport-security": ["max-age=2592000"], "server": ["nginx"], "connection": ["keep-alive"], "x-rate-limit-reset": ["1406926800"], "x-uber-app": ["uberex-nonsandbox"], "date": ["Fri, 01 Aug 2014 20:19:37 GMT"], "x-rate-limit-limit": ["5000"], "content-type": ["application/json"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.uber.com/v1/products?latitude=37.781955&longitude=-122.402367"}, "recorded_at": "2014-08-01T20:19:38"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/submit_failure.json b/Docker/app/test/fixtures/submit_failure.json deleted file mode 100644 index 61484cf..0000000 --- a/Docker/app/test/fixtures/submit_failure.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"string": "code=not_a_code&redirect_uri=http%3A%2F%2Flocalhost%3ANone%2Fsubmit&grant_type=authorization_code", "encoding": "utf-8"}, "headers": {"Content-Length": ["97"], "Accept-Encoding": ["gzip, deflate"], "Accept": ["*/*"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.4.0"], "Content-Type": ["application/x-www-form-urlencoded"], "Authorization": ["Basic MW14R3hFR1JxcXU3Z1g5OTE2Rnc0azkwVmlHcTdLVzA6SV8wNW9tRFNIbjM5MHFISDJreE5zd04wb004bUZlc3pBQnREckQ0RQ=="]}, "method": "POST", "uri": "https://login.uber.com/oauth/token"}, "response": {"body": {"string": "{\"error\": \"invalid_grant\"}", "encoding": null}, "headers": {"content-length": ["26"], "server": ["nginx"], "connection": ["keep-alive"], "pragma": ["no-cache"], "cache-control": ["no-store"], "date": ["Mon, 06 Oct 2014 17:11:05 GMT"], "x-uber-app": ["login"], "content-type": ["application/json"]}, "status": {"message": "BAD REQUEST", "code": 400}, "url": "https://login.uber.com/oauth/token"}, "recorded_at": "2014-10-06T17:11:05"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/time_estimates_failure.json b/Docker/app/test/fixtures/time_estimates_failure.json deleted file mode 100644 index 2fb1c2c..0000000 --- a/Docker/app/test/fixtures/time_estimates_failure.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept": ["*/*"], "Content-Type": ["application/json"], "Accept-Encoding": ["gzip, deflate"], "Authorization": ["bearer NOT_A_CODE"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.2.0"]}, "method": "GET", "uri": "https://api.uber.com/v1/estimates/time?start_longitude=-122.402367&start_latitude=37.781955"}, "response": {"body": {"string": "{\"message\":\"No authentication provided.\",\"code\":\"unauthorized\"}", "encoding": null}, "headers": {"content-length": ["63"], "server": ["nginx"], "connection": ["keep-alive"], "date": ["Fri, 01 Aug 2014 20:19:38 GMT"], "x-uber-app": ["uberex-nonsandbox"], "content-type": ["application/json"]}, "status": {"message": "Unauthorized", "code": 401}, "url": "https://api.uber.com/v1/estimates/time?start_longitude=-122.402367&start_latitude=37.781955"}, "recorded_at": "2014-08-01T20:19:38"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/fixtures/time_estimates_success.json b/Docker/app/test/fixtures/time_estimates_success.json deleted file mode 100644 index f7f1478..0000000 --- a/Docker/app/test/fixtures/time_estimates_success.json +++ /dev/null @@ -1 +0,0 @@ -{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept": ["*/*"], "Content-Type": ["application/json"], "Accept-Encoding": ["gzip, deflate"], "Authorization": ["bearer 42Kq726Vv6lzJ0TMhXWsgUulVjRsxh"], "User-Agent": ["python-requests/2.3.0 CPython/2.7.5 Darwin/13.2.0"]}, "method": "GET", "uri": "https://api.uber.com/v1/estimates/time?start_longitude=-122.402367&start_latitude=37.781955"}, "response": {"body": {"base64_string": "H4sIAAAAAAAAA6tWKsnMTS1WsoquVsrJT07MyaxKTYlPySwuyEmsjM9LzE1VslIKTUotcvJxdPZW0lFKLQZqSCwBChsZmOgo4VFZUJSfUppcEp+ZAjSiFGhEUk5icrZSrQ4Bm4JDw4iyB6IOw5bi0jL8doCcEuIY4YliibG5IYZnkBRi2FKSWJFJ2JoIFDuMTM2x2gFShWFBhVJtbC0AZXhg5Z4BAAA=", "encoding": null}, "headers": {"x-rate-limit-remaining": ["4991"], "content-language": ["en"], "content-encoding": ["gzip"], "transfer-encoding": ["chunked"], "strict-transport-security": ["max-age=2592000"], "server": ["nginx"], "connection": ["keep-alive"], "x-rate-limit-reset": ["1406926800"], "x-uber-app": ["uberex-nonsandbox"], "date": ["Fri, 01 Aug 2014 20:19:38 GMT"], "x-rate-limit-limit": ["5000"], "content-type": ["application/json"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.uber.com/v1/estimates/time?start_longitude=-122.402367&start_latitude=37.781955"}, "recorded_at": "2014-08-01T20:19:38"}], "recorded_with": "betamax/0.4.0"} \ No newline at end of file diff --git a/Docker/app/test/test_endpoints.py b/Docker/app/test/test_endpoints.py deleted file mode 100644 index 746b849..0000000 --- a/Docker/app/test/test_endpoints.py +++ /dev/null @@ -1,161 +0,0 @@ -import unittest - -from betamax import Betamax -from app import app - -with Betamax.configure() as config: - config.cassette_library_dir = 'test/fixtures' - -test_auth_token = 'MLth87eHvSAaCQ1vn7jTd0xA9Kapo5' - - -class TestCases(unittest.TestCase): - def setUp(self): - # Necessary to disable SSLify - app.debug = True - self.test_app = app.test_client() - self.session = app.requests_session - - def test_health_endpoint(self): - """Assert that the health endpoint works.""" - response = app.test_client().get('/health') - self.assertEquals(response.data, ';-)') - - def test_root_endpoint(self): - """Assert that the / endpoint correctly redirects to login.uber.com.""" - response = app.test_client().get('/') - self.assertIn('login.uber.com', response.data) - - def test_submit_endpoint_failure(self): - """Assert that the submit endpoint returns no code in the response.""" - with app.test_client() as client: - with client.session_transaction() as session: - session['access_token'] = test_auth_token - with Betamax(app.requests_session).use_cassette('submit_failure'): - response = client.get('/submit?code=not_a_code') - self.assertIn('None', response.data) - - def test_products_endpoint_returns_success(self): - """Assert that the products endpoint returns success. - - When a valid key is passed in. - """ - with app.test_client() as client: - with client.session_transaction() as session: - session['access_token'] = test_auth_token - with Betamax(app.requests_session).use_cassette('products_success'): - response = client.get('/products') - self.assertIn('products', response.data) - self.assertEquals(response.status_code, 200) - - def test_products_endpoint_returns_failure(self): - """Assert that the products endpoint returns failure. - - When an invalid key is passed in. - """ - with app.test_client() as client: - with client.session_transaction() as session: - session['access_token'] = 'NOT_A_CODE' - with Betamax(self.session).use_cassette('products_failure'): - response = client.get('/products') - self.assertEquals(response.status_code, 401) - - def test_time_estimates_endpoint_returns_success(self): - """Assert that the time estimates endpoint returns success. - - When a valid key is passed in. - """ - with app.test_client() as client: - with client.session_transaction() as session: - session['access_token'] = test_auth_token - with Betamax(app.requests_session).use_cassette('time_estimates_success'): - response = client.get('/time') - self.assertIn('times', response.data) - self.assertEquals(response.status_code, 200) - - def test_time_estimates_endpoint_returns_failure(self): - """Assert that the time estimates endpoint returns failure. - - When an invalid key is passed in. - """ - with app.test_client() as client: - with client.session_transaction() as session: - session['access_token'] = 'NOT_A_CODE' - with Betamax(app.requests_session).use_cassette('time_estimates_failure'): - response = client.get('/time') - self.assertEquals(response.status_code, 401) - - def test_price_estimates_endpoint_returns_success(self): - """Assert that the price estimates endpoint returns success. - - When a valid key is passed in. - """ - with app.test_client() as client: - with client.session_transaction() as session: - session['access_token'] = test_auth_token - with Betamax(app.requests_session).use_cassette('price_estimates_success'): - response = client.get('/price') - self.assertIn('prices', response.data) - self.assertEquals(response.status_code, 200) - - def test_price_estimates_endpoint_returns_failure(self): - """Assert that the price estimates endpoint returns failure. - - When an invalid key is passed in. - """ - with app.test_client() as client: - with client.session_transaction() as session: - session['access_token'] = 'NOT_A_CODE' - with Betamax(app.requests_session).use_cassette('price_estimates_failure'): - response = client.get('/price') - self.assertEquals(response.status_code, 401) - - def test_history_endpoint_returns_success(self): - """Assert that the history endpoint returns success. - - When a valid key is passed in. - """ - with app.test_client() as client: - with client.session_transaction() as session: - session['access_token'] = test_auth_token - with Betamax(app.requests_session).use_cassette('history_success'): - response = client.get('/history') - self.assertIn('history', response.data) - self.assertEquals(response.status_code, 200) - - def test_history_endpoint_returns_failure(self): - """Assert that the price estimates endpoint returns failure. - - When an invalid key is passed in. - """ - with app.test_client() as client: - with client.session_transaction() as session: - session['access_token'] = 'NOT_A_CODE' - with Betamax(app.requests_session).use_cassette('history_failure'): - response = client.get('/history') - self.assertEquals(response.status_code, 401) - - def test_me_endpoint_returns_success(self): - """Assert that the me endpoint returns success. - - When a valid key is passed in. - """ - with app.test_client() as client: - with client.session_transaction() as session: - session['access_token'] = test_auth_token - with Betamax(app.requests_session).use_cassette('me_success'): - response = client.get('/me') - self.assertIn('picture', response.data) - self.assertEquals(response.status_code, 200) - - def test_me_endpoint_returns_failure(self): - """Assert that the me endpoint returns failure. - - When an invalid key is passed in. - """ - with app.test_client() as client: - with client.session_transaction() as session: - session['access_token'] = 'NOT_A_CODE' - with Betamax(app.requests_session).use_cassette('me_failure'): - response = client.get('/me') - self.assertEquals(response.status_code, 401) diff --git a/EKS-VPC-CloudFormation/readme.md b/EKS-VPC-CloudFormation/readme.md deleted file mode 100644 index 021c239..0000000 --- a/EKS-VPC-CloudFormation/readme.md +++ /dev/null @@ -1,44 +0,0 @@ -When running EKS, it requires specific networking. Because all environments will most likely be different, there's a CloudFormation template for this exact purpose. - ---- - -To create your cluster VPC with public and private subnets - -1. Open the AWS CloudFormation console at https://console.aws.amazon.com/cloudformation. - -2. From the navigation bar, select a Region that supports Amazon EKS. - -3. Choose Create stack, With new resources (standard). - -4. For Choose a template, select Specify an Amazon S3 template URL. - -5. Paste the following URL into the text area and choose Next: -``` -https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/amazon-eks-vpc-private-subnets.yaml -``` - -6. On the *Specify Details* page, fill out the following: - -- Stack name: Choose a stack name for your AWS CloudFormation stack. For example, you can call it eks-vpc. - -- VpcBlock: Choose a CIDR range for your VPC. Each worker node, pod, and load balancer that you deploy is assigned an IP address from this block. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it. For more information, see VPC and subnet sizing in the Amazon VPC User Guide. You can also add additional CIDR blocks to the VPC once it's created. - -- PublicSubnet01Block: Specify a CIDR block for public subnet 1. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it - -- PublicSubnet02Block: Specify a CIDR block for public subnet 2. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it - -- PrivateSubnet01Block: Specify a CIDR block for private subnet 1. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it - -- PrivateSubnet02Block: Specify a CIDR block for private subnet 2. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it - -7. (Optional) On the Options page, tag your stack resources. Choose Next. - -8. On the Review page, choose Create. - -9. When your stack is created, select it in the console and choose Outputs. - -10. Record the SecurityGroups value for the security group that was created. When you add nodes to your cluster, you must specify the ID of the security group. The security group is applied to the elastic network interfaces that are created by Amazon EKS in your subnets that allows the control plane to communicate with your nodes. These network interfaces have Amazon EKS in their description. - -11. Record the VpcId for the VPC that was created. You need this when you launch your node group template. - -12. Record the SubnetIds for the subnets that were created and whether you created them as public or private subnets. When you add nodes to your cluster, you must specify the IDs of the subnets that you want to launch the nodes into. \ No newline at end of file diff --git a/README.md b/README.md index a1c31f2..0bf5261 100644 --- a/README.md +++ b/README.md @@ -1,67 +1,213 @@ -# DevOps-The-Hard-Way-AWS - -This tutorial contains a full, real-world solution for setting up an environment that is using DevOps technologies and practices for deploying apps and cloud services/cloud infrastructure to AWS. - - -The repository contains free labs, documentation, diagrams, and docs for setting up an entire workflow and DevOps environment from a real-world perspective in AWS. - -## DevOps Scenario -The scenario that you're currently facing is you work in an organization that is very monolithic. There is a ton of bare metal, virtualization, manual deployments of applications, and **old school** practices based on the current teams knowledge of IT. - -You're brought in to the company and team to make things more modern so the organization can not only succeed, but stay ahead of their competition. Management now understands the needs and complexity that comes with staying ahead of their competition and they know that they need to. Otherwise, the organization will fall... - -## DevOps Solution -The solution is to deploy the Uber API for the sign-up page. Currently this solution is sitting on a bunch of baremetal, but it's time to sprinkle a little DevOps on it. - -![](images/uber.png) - -As a DevOps Engineer, you're more or less (most likely) not writing the app, but instead, deploying it. That's why you're not writing your own app in this tutorial. - -*Full Disclosure* - I did have to edit this app a bit from Uber to make it compatible with Python3. You can find the repo here: - -https://github.com/AdminTurnedDevOps/Python-Sample-Application - -## Technology Details -You will be using the following technologies and platforms to set up a DevOps environment. - -1. AWS - - AWS will be used to host the application, cloud infrastructure, and any other services we may need to ensure the Uber app is deployed properly. -2. GitHub - - To store the application and infrastructure/automation code -3. Python - - Python will be used for the Uber app (it is written in Python) and some automation efforts that aren't in Terraform. -4. Terraform - - Create an S3 bucket to store Terraform State files - - Create an AWS ECR repository with Terraform - - Create an EKS cluster -5. Docker - - Create a Docker image - - Store the Docker image in AWS ECR -6. Kubernetes - - To run the Docker image that's created for the containerized Uber app. Kubernetes, in this case, EKS, will be used to orchestrate the container. -7. CI/CD - - Use GitHub Actions to create an EKS cluster -8. Automated testing - - Testing Terraform code with Checkov - -## Labs -1. [Prerequisites](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/prerequisites.md) -2. VPC - When running EKS, it requires specific networking. Because all environments will most likely be different, there's a CloudFormation template for this exact purpose. - - [Create EKS VPC](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/tree/main/EKS-VPC-CloudFormation) -3. AWS: - - [Configure credentials to access AWS at a programmatic level](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/AWS/1-Configure-Credentials-To-Access-AWS.md) -4. Terraform - The purpose of the Terraform section is to create all of the AWS cloud services you'll need from an environment/infrastructure perspective to run the Uber application. - - [Create S3 Bucket To Store TFSTATE Files](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/Terraform-AWS-Services-Creation/1-Create-S3-Bucket-To-Store-TFSTATE-Files.md) - - [Create an Elastic Container Registry](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/Terraform-AWS-Services-Creation/2-Create-ECR.md) - - [Create An EKS Cluster IAM Role, And Policy For EKS](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/Terraform-AWS-Services-Creation/3-Create-EKS-Cluster-IAM-Role-And-Policy.md) - - [Create An EKS Cluster](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/Terraform-AWS-Services-Creation/3-Create-EKS-Cluster-IAM-Role-And-Policy.md) -5. Docker - The purpose of the Docker section is to create a Docker image from the app that the organization is running on-prem (the uber app), containerize it, and store the container inside of a container repository. For the container repo, you'll use AWS ECR. - - [Create The Docker Image](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/Docker/1-Create-Docker-Image.md) - - [Log Into AWS ECR Repository](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/Docker/Push%20Image%20To%20ECR.md) -6. Kubernetes - The purpose of the Kubernetes section is to connect to EKS locally and to write the Kubernetes manifest to deploy the Python Uber app. - - [Connect To EKS From The Terminal](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/kubernetes_manifest/1-Connect-To-EKS.md) - - [Create A Kubernetes Manifest](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/kubernetes_manifest/2-Create-Kubernetes-Manifest.md) -7. Automated Testing - The purpose of the Automation Testing section is to ensure that all of the Terraform code is performing as it should be from a policy, security, and static code analysis perspective. - - [Install And Run Checkov](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/Terraform-Static-Code-Analysis/1-Checkov-For-Terraform.md) -8. CICD - The purpose of this section is to automatically create an EKS cluster with CICD using GitHub Actions - - [Create a GitHub Actions CICD pipeline](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/Terraform-AWS-Services-Creation/4-Run-CICD-For-EKS-Cluster.md) +# DevOps the Hard Way on Azure + +> **Learning Platform** | **Total Time: 3-4 hours** | **11 Tutorials** + +A comprehensive DevOps tutorial series for Microsoft Azure β€” a step-by-step learning experience designed to build hands-on skills with modern cloud-native tools. + +## What Makes This Special? + +### Interactive Learning Experience +- Realistic time estimates for effective learning planning +- Step-by-step validation with automated testing scripts +- Comprehensive troubleshooting for independent problem-solving +- Knowledge checks with quizzes and deep-dive questions +- Progress tracking with interactive checkboxes + +### Enterprise-Grade Content +- Real-world scenarios based on actual industry practices +- Security-first approach with best practices throughout +- Modern tool versions (Kubernetes 1.35, Terraform 1.14.0, Python 3.13) +- Production-ready configurations you can use in your organization +- Comprehensive documentation that rivals premium training platforms + +## The DevOps Transformation Challenge + +**Scenario:** You've joined a company trapped in legacy infrastructure: +- [ ] Bare metal servers consuming resources and creating bottlenecks +- [ ] Manual deployments causing delays and human errors +- [ ] Outdated IT practices hindering innovation and growth + +> **Your Mission:** Lead a complete digital transformation using modern DevOps practices, containerization, and cloud-native technologies. + +## The Modern DevOps Solution + +Transform the **thomasthornton.cloud** application from legacy infrastructure to a cloud-native, containerized, auto-scaling solution with: + +- **Infrastructure as Code** for repeatable, version-controlled deployments +- **Container orchestration** with Kubernetes for high availability +- **Automated CI/CD pipelines** for rapid, reliable releases +- **Security scanning** and compliance automation +- **Comprehensive monitoring** and observability + +![Application Architecture](images/website.png) + +> **Focus:** As a DevOps/Platform Engineer, you're the infrastructure architect and automation specialist β€” transforming how applications are deployed, scaled, and maintained. + +## Technology Stack + +| Technology | Purpose | Version | +|------------|---------|---------| +| **Azure** | Cloud platform & services | Latest | +| **Terraform** | Infrastructure as Code | >= 1.14.0 | +| **azurerm Provider** | Azure Terraform provider | 4.68.0 | +| **Docker** | Containerization | Latest | +| **Kubernetes (AKS)** | Container orchestration | v1.35 | +| **ALB Controller** | Azure Load Balancer for K8s | v1.9.16 | +| **Python** | Application runtime | v3.13 | +| **Flask** | Web framework | v3.1.3 | +| **Werkzeug** | WSGI utility library | v3.1.8 | +| **GitHub Actions** | CI/CD automation | Latest | +| **Checkov** | Security scanning | v3.2.4+ | +| **Terraform-docs** | Documentation automation | Latest | + +## Learning Journey + +> Each tutorial includes validation scripts, troubleshooting guides, and knowledge checks. + +### πŸ—οΈ Foundation Setup | ⏱️ 20-30 minutes + +**Essential prerequisites for all subsequent tutorials:** + +- [ ] **[Prerequisites Guide](prerequisites.md)** - Complete setup checklist +- [ ] **[Configure Terraform Remote Storage](1-Azure/1-Configure-Terraform-Remote-Storage.md)** *(10-15 min)* + - Secure state management for team collaboration +- [ ] **[Create Azure AD Group for AKS Admins](1-Azure/2-Create-Azure-AD-Group-AKS-Admins.md)** *(8-12 min)* + - Identity management and RBAC foundation + +### πŸ—οΈ Infrastructure as Code | ⏱️ 80-120 minutes + +**Build production-ready Azure infrastructure with Terraform:** + +- [ ] **[Create Azure Container Registry (ACR)](2-Terraform-AZURE-Services-Creation/1-Create-ACR.md)** *(15-20 min)* + - Secure container image storage with premium features +- [ ] **[Create Azure Virtual Network (VNET)](2-Terraform-AZURE-Services-Creation/2-Create-VNET.md)** *(25-30 min)* + - Network segmentation with NSGs and load balancing +- [ ] **[Create Log Analytics Workspace](2-Terraform-AZURE-Services-Creation/3-Create-Log-Analytics.md)** *(15-20 min)* + - Centralized monitoring and container insights +- [ ] **[Create AKS Cluster with IAM Roles](2-Terraform-AZURE-Services-Creation/4-Create-AKS-Cluster-IAM-Roles.md)** *(25-35 min)* + - Production-ready Kubernetes with auto-scaling and Azure AD integration + +### 🐳 Containerization | ⏱️ 40-50 minutes + +**Transform applications into portable, scalable containers:** + +- [ ] **[Create Docker Image](3-Docker/1-Create-Docker-Image.md)** *(20-25 min)* + - Multi-stage builds with security best practices +- [ ] **[Push Image to ACR](3-Docker/2-Push%20Image%20To%20ACR.md)** *(20-25 min)* + - Secure image distribution and vulnerability scanning + +### ☸️ Kubernetes Deployment | ⏱️ 50-70 minutes + +**Deploy and manage applications in production Kubernetes:** + +- [ ] **[Connect to AKS](4-kubernetes_manifest/1-Connect-To-AKS.md)** *(10-15 min)* + - Cluster authentication and kubectl configuration +- [ ] **[Create Kubernetes Manifest](4-kubernetes_manifest/2-Create-Kubernetes-Manifest.md)** *(20-25 min)* + - Production-ready deployments with health checks +- [ ] **[Deploy Application to AKS](4-kubernetes_manifest/3-Deploy-Thomasthorntoncloud-App.md)** *(20-30 min)* + - Live application deployment with monitoring + +### πŸ”’ Security & Quality Assurance | ⏱️ 25-35 minutes + +**Implement security scanning and compliance:** + +- [ ] **[Checkov Security Scanning](5-Terraform-Static-Code-Analysis/1-Checkov-For-Terraform.md)** *(15-20 min)* + - Automated infrastructure security analysis +- [ ] **[tfsec Static Analysis](5-Terraform-Static-Code-Analysis/2-tfsec.md)** *(10-15 min)* + - Deep Terraform security scanning with detailed rule explanations + +### πŸš€ Automation & CI/CD | ⏱️ 40-50 minutes + +**Implement continuous integration and deployment:** + +- [ ] **[GitHub Actions CI/CD Pipeline](2-Terraform-AZURE-Services-Creation/5-Run-CICD-For-AKS-Cluster.md)** *(25-35 min)* + - Automated testing, building, and deployment +- [ ] **[Terraform Documentation Automation](6-Terraform-Docs/1-Setup-Terraform-Docs.md)** *(20-25 min)* + - Auto-generated documentation with GitHub Actions + +## Learning Validation & Assessment + +### Knowledge Checkpoints + +After each section, validate your understanding: + +**πŸ—οΈ Foundation Knowledge:** +- [ ] Why is remote state crucial for Terraform team collaboration? +- [ ] How does Azure AD integration enhance AKS security? + +**🐳 Containerization Mastery:** +- [ ] What are the benefits of multi-stage Docker builds? +- [ ] How does container registry security impact deployment pipelines? + +**☸️ Kubernetes Expertise:** +- [ ] How do health checks improve application reliability? +- [ ] What's the difference between Deployments and Services? + +**πŸ”’ Security Implementation:** +- [ ] How does static code analysis prevent security vulnerabilities? +- [ ] Why is policy-as-code important for compliance? + +**πŸš€ DevOps Excellence:** +- [ ] How do CI/CD pipelines accelerate time-to-market? +- [ ] What role does automated documentation play in maintenance? + +### Practical Skills Assessment + +**Can you now:** +- Deploy infrastructure using Infrastructure as Code? +- Containerize applications with security best practices? +- Manage Kubernetes clusters in production? +- Implement automated security scanning? +- Build CI/CD pipelines for continuous delivery? +- Automate documentation and compliance processes? + +## What You'll Achieve + +### Professional Skills +- **Cloud-native architecture** design and implementation +- **Infrastructure as Code** mastery with Terraform +- **Container orchestration** expertise with Kubernetes +- **DevOps pipeline** creation and optimization +- **Security automation** and compliance practices + +### Career Impact +- **Portfolio projects** demonstrating real-world DevOps capabilities +- **Industry-standard practices** applicable to any organization +- **Modern toolchain proficiency** in high-demand technologies +- **Problem-solving skills** through comprehensive troubleshooting experience + +### Organizational Benefits +- **Reduced deployment time** from hours to minutes +- **Increased reliability** through automated testing and monitoring +- **Enhanced security** with continuous scanning and compliance +- **Improved scalability** with cloud-native architecture +- **Lower operational costs** through automation and optimization + +## ⚠️ Important Notes + +### Tutorial Repository Usage +This repository contains **tutorial content and examples**. The GitHub Actions workflows are **disabled** to prevent accidental execution. To use the CI/CD pipelines: + +1. **Fork this repository** to your own GitHub account +2. **Enable Actions** in your forked repository +3. **Configure secrets** as described in the CI/CD tutorial +4. **Follow the tutorial instructions** for deployment + +### Cost Considerations +This tutorial uses **Azure services that incur costs**. Estimated costs: +- **Development/Learning:** $50-100/month +- **Production-equivalent:** $200-500/month + +Use the [Azure Pricing Calculator](https://azure.microsoft.com/pricing/calculator/) for accurate estimates. + +## Ready to Start? + +Begin with [Prerequisites](prerequisites.md) and follow the sequential learning path. Each tutorial builds on the previous, creating a comprehensive skillset that directly translates to real-world DevOps work. + +--- + +## Feedback & Community + +**Questions or Issues?** Open an issue or submit a pull request β€” your feedback helps improve this for everyone. + +**Found this valuable?** Star the repository and share it with your network. \ No newline at end of file diff --git a/Terraform-AWS-Services-Creation/1-Create-S3-Bucket-To-Store-TFSTATE-Files.md b/Terraform-AWS-Services-Creation/1-Create-S3-Bucket-To-Store-TFSTATE-Files.md deleted file mode 100644 index e964f03..0000000 --- a/Terraform-AWS-Services-Creation/1-Create-S3-Bucket-To-Store-TFSTATE-Files.md +++ /dev/null @@ -1,17 +0,0 @@ -# Create an S3 bucket to store Terraform state files - -In this lab you will create an S3 bucket that will be used to store Terraform state files - -## Create The Terraform Configurations - -1. You can find the Terraform configuration for the S3 bucket [here](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/tree/main/Terraform-AWS-Services-Creation/terraform-state-s3-bucket). The Terraform configuration files are used to create an S3 bucket that will store your TFSTATE. - -The Terraform `main.tf` will do a few things: -- Create the S3 bucket in the `us-east-1` region -- Ensure that version enabling is set to `True` -- Utilize AES256 encryption - -2. Create the bucket by running the following: -- `terraform init` - To initialize the working directory and pull down the provider -- `terraform plan` - To go through a "check" and confirm the configurations are valid -- `terraform apply - To create the resource \ No newline at end of file diff --git a/Terraform-AWS-Services-Creation/2-Create-ECR.md b/Terraform-AWS-Services-Creation/2-Create-ECR.md deleted file mode 100644 index 59825ad..0000000 --- a/Terraform-AWS-Services-Creation/2-Create-ECR.md +++ /dev/null @@ -1,17 +0,0 @@ -# Create an Elastic Container Registry Repository - -In this lab you will create a repository to store the Docker image that you created for the Uber app. - -## Create the ECR Terraform Configuration - -1. You can find the Terraform configuration for ECR [here](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/tree/main/Terraform-AWS-Services-Creation/ECR). The Terraform configuration files are used to create a repository in Elastic Container Repository (ECR). - -The Terraform `main.tf` will do a few things: -- Use a Terraform backend to store the `.tfstate` in an S3 bucket -- Use the `us-east-1` region, but feel free to change that if you'd like -- Use the `aws_ecr_repository` Terraform resource to create a new respository. - -2. Create the bucket by running the following: -- `terraform init` - To initialize the working directory and pull down the provider -- `terraform plan` - To go through a "check" and confirm the configurations are valid -- `terraform apply - To create the resource \ No newline at end of file diff --git a/Terraform-AWS-Services-Creation/3-Create-EKS-Cluster-IAM-Role-And-Policy.md b/Terraform-AWS-Services-Creation/3-Create-EKS-Cluster-IAM-Role-And-Policy.md deleted file mode 100644 index c9496b2..0000000 --- a/Terraform-AWS-Services-Creation/3-Create-EKS-Cluster-IAM-Role-And-Policy.md +++ /dev/null @@ -1,19 +0,0 @@ -# Create An EKS Cluster and IAM Role/Policy - -In this lab you will create: -- The appropriate IAM role and policy for EKS. -- The EKS cluster - -## Create the EKS Terraform Configuration - -1. You can find the Terraform configuration for EKS [here](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/tree/main/Terraform-AWS-Services-Creation/EKS-With-Worker-Nodes). The Terraform configuration files are used to create an EKS cluster and IAM Role/Policy for EKS. - -The Terraform `main.tf` will do a few things: -- Use a Terraform backend to store the `.tfstate` in an S3 bucket -- Use the `us-east-1` region, but feel free to change that if you'd like -- Use the `aws_iam_role` and `aws_iam_policy` Terraform resource to create a new IAM configuration. - -2. Create the bucket by running the following: -- `terraform init` - To initialize the working directory and pull down the provider -- `terraform plan` - To go through a "check" and confirm the configurations are valid -- `terraform apply - To create the resource diff --git a/Terraform-AWS-Services-Creation/4-Run-CICD-For-EKS-Cluster.md b/Terraform-AWS-Services-Creation/4-Run-CICD-For-EKS-Cluster.md deleted file mode 100644 index fe72471..0000000 --- a/Terraform-AWS-Services-Creation/4-Run-CICD-For-EKS-Cluster.md +++ /dev/null @@ -1,37 +0,0 @@ -# Create EKS Cluster With CICD - -In this lab, you'll learn how to create an EKS cluster using GitHub Actions. The code can be found [here](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/tree/main/Terraform-AWS-Services-Creation/EKS-With-Worker-Nodes) - - -## Secrets -Prior to running the pipeline, you'll need to set up authentication from GitHub to AWS. To do that, you'll set up secrets. - -You'll need an AWS Access Key ID and an AWS Secret Access Key as those are the two secrets you'll be adding into the GitHub repository. These two secrets will allow you to connect to AWS from GitHub Actions. - -1. In the code repository, go to Settings --> Secrets -2. Add in two secrets: -`AWS_ACCESS_KEY_ID` -`AWS_SECRET_ACCESS_KEY` - -The values should come from an AWS Access Key and Secret Key. The access key/secret key must be part of a user that has policies attached for the resources being created in AWS. - -3. Save the secrets. - -## Pipeline -Now that the secrets are created, it's time to create the pipeline. - -1. Under the GitHub repository, click on the **Actions** tab -2. Under **Get started with Actions**, click the *set up a workflow yourself* button -3. Inside of the workflow, copy in the contents that you can find [here](https://github.com/AdminTurnedDevOps/DevOps-The-Hard-Way-AWS/blob/main/.github/workflows/main.yml) - -The pipeline does a few things: -- On line 4, you'll see `workflow_dispatch`, which means the pipeline won't automatically run unless you kick it off. You can of course change this to have the pipeline automatically run if you, for example, push code to the `dev` or `main` branch. -- The code is checked-out -- Authentication occurs to AWS -- Terraform is set up -- Terraform init occurs -- Terraform format occurs -- Terraform plan occurs -- Terraform apply occurs - -4. Run the pipeline and watch as the pipeline automatically creates the EKS cluster diff --git a/Terraform-AWS-Services-Creation/ECR/main.tf b/Terraform-AWS-Services-Creation/ECR/main.tf deleted file mode 100644 index 274e37f..0000000 --- a/Terraform-AWS-Services-Creation/ECR/main.tf +++ /dev/null @@ -1,25 +0,0 @@ -terraform { - backend "s3" { - bucket = "terraform-state-devopsthehardway" - key = "ecr-terraform.tfstate" - region = "us-east-1" - } - required_providers { - aws = { - source = "hashicorp/aws" - } - } -} - -provider "aws" { - region = "us-east-1" -} - -resource "aws_ecr_repository" "devopsthehardway-ecr-repo" { - name = var.repo_name - image_tag_mutability = "MUTABLE" - - image_scanning_configuration { - scan_on_push = true - } -} \ No newline at end of file diff --git a/Terraform-AWS-Services-Creation/ECR/terraform.tfvars b/Terraform-AWS-Services-Creation/ECR/terraform.tfvars deleted file mode 100644 index ce54947..0000000 --- a/Terraform-AWS-Services-Creation/ECR/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -repo_name = "devopsthehardway-ecr" \ No newline at end of file diff --git a/Terraform-AWS-Services-Creation/ECR/variables.tf b/Terraform-AWS-Services-Creation/ECR/variables.tf deleted file mode 100644 index ae61bb5..0000000 --- a/Terraform-AWS-Services-Creation/ECR/variables.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable repo_name { - type = string - default = "devopsthehardway" - description = "ECR repo to store a Docker image" -} diff --git a/Terraform-AWS-Services-Creation/EKS-Fargate/main.tf b/Terraform-AWS-Services-Creation/EKS-Fargate/main.tf deleted file mode 100644 index d693d6a..0000000 --- a/Terraform-AWS-Services-Creation/EKS-Fargate/main.tf +++ /dev/null @@ -1,93 +0,0 @@ -terraform { - backend "s3" { - bucket = "terraform-state-devopsthehardway" - key = "eks-terraform.tfstate" - region = "us-east-1" - } - required_providers { - aws = { - source = "hashicorp/aws" - } - } -} - - -# IAM Role for EKS to have access to the appropriate resources -resource "aws_iam_role" "eks-iam-role" { - name = "devopsthehardway-eks-iam-role" - - path = "/" - - assume_role_policy = < ⚠️ This file is for **local/lab testing only**. It is not part of the main tutorial content. + +## Prerequisites + +Ensure the following tools are installed and up to date: + +| Tool | Minimum Version | +|------|----------------| +| Azure CLI | Latest | +| Terraform | 1.14.8 | +| Docker | Latest | +| kubectl | Latest | +| Helm | Latest | + +Log in to Azure before running any scripts: + +```bash +az login +az account set --subscription "" +``` + +## Configuration + +The deploy script uses these environment variables (with defaults shown): + +```bash +export PROJECT_NAME="devopsthehardway" # Prefix for all Azure resources +export LOCATION="uksouth" # Azure region +``` + +> **AKS version note:** The deployment targets Kubernetes `1.35`. Verify it is available in your chosen region before deploying: +> ```bash +> az aks get-versions --location uksouth --query "values[].version" -o table +> ``` + +## Automated Deployment + +Run from the **repository root**: + +```bash +# Clone the repository +git clone https://github.com/thomast1906/DevOps-The-Hard-Way-Azure.git +cd DevOps-The-Hard-Way-Azure + +# Deploy everything (infrastructure + app) +./scripts/deploy-all.sh + +# Clean up all resources when done +./scripts/cleanup-all.sh +``` + +The deploy script will: +1. Verify Azure authentication and prerequisites +2. Create Terraform remote state storage (`${PROJECT_NAME}-terraform-rg`) +3. Create an Azure AD group for AKS admins +4. Deploy ACR, VNet, Log Analytics, and AKS via Terraform (each with its own remote state key) +5. Build and push the Docker image to ACR +6. Deploy the application to Kubernetes +7. Install the ALB Controller and configure Gateway API resources +8. Print the application URL when ready + +## GitHub Actions Deployment + +The repository includes a full GitHub Actions workflow (`deploy-full.yml`) for automated deployment: + +1. **Fork this repository** +2. **Set up Azure OIDC secrets** in your repository settings: + - `AZURE_AD_CLIENT_ID` + - `AZURE_AD_TENANT_ID` + - `AZURE_SUBSCRIPTION_ID` +3. **Trigger the workflow** via the **Actions** tab β†’ `Deploy DevOps The Hard Way - Azure` β†’ `Run workflow` + +Choose an environment (`dev` / `staging` / `prod`) and optionally enable **Destroy after deploy** for ephemeral test runs. + +## Versions in Use + +| Component | Version | +|-----------|---------| +| Terraform | 1.14.8 | +| Azure Provider (azurerm) | ~> 4.68 | +| AKS Kubernetes | 1.35 | +| Python base image | 3.13-slim | +| Flask | 3.1.3 | +| Werkzeug | 3.1.8 | diff --git a/images/uber.png b/images/uber.png deleted file mode 100644 index 5cc8d60..0000000 Binary files a/images/uber.png and /dev/null differ diff --git a/images/website.png b/images/website.png new file mode 100644 index 0000000..e723233 Binary files /dev/null and b/images/website.png differ diff --git a/kubernetes_manifest/1-Connect-To-EKS.md b/kubernetes_manifest/1-Connect-To-EKS.md deleted file mode 100644 index 2f1d4e2..0000000 --- a/kubernetes_manifest/1-Connect-To-EKS.md +++ /dev/null @@ -1,13 +0,0 @@ -# Connecting To Elastic Kubernetes Service (EKS) - -When you're deploying locally, without any CI/CD to EKS, you'll need to authenticate from your local terminal. - -Once you authenticate to EKS from your local terminal, a `kubeconfig` gets stored on your computer. The `kubeconfig` has all of the connection information and authentication needs to connect to EKS. - -## Connecting To EKS - -1. Run the following command to connect to EKS: -`aws eks --region *your_aws_region* update-kubeconfig --name *your_eks_cluster_name` - -2. Once connected, you should be able to run commands like the following to confirm you're connected: -`kubectl get nodes` \ No newline at end of file diff --git a/kubernetes_manifest/2-Create-Kubernetes-Manifest.md b/kubernetes_manifest/2-Create-Kubernetes-Manifest.md deleted file mode 100644 index 1c0a8f5..0000000 --- a/kubernetes_manifest/2-Create-Kubernetes-Manifest.md +++ /dev/null @@ -1,17 +0,0 @@ -# Create The Kubernetes Manifest - -At this point you have successfully created a Docker image from the Uber app and stored it in ECR. - -Now it's time to set up the Kubernetes manifest, which will take the application and deploy it to EKS. - -## The Manifest - -The Kubernetes manifest will consist of two components: -- The deployment -- The service - -The deployment is what gets the application running in Kubernetes - -The service is what exposes the Kubernetes application so you can, for example, reach the frontend from a load balancer hostname or IP. - -The manifest can be found in the `kubernetes_manifest` directory. Ensure on line `31` to change the image URL to the one you have in your AWS account. \ No newline at end of file diff --git a/kubernetes_manifest/3-Deploy-Uber-App.md b/kubernetes_manifest/3-Deploy-Uber-App.md deleted file mode 100644 index a15c3df..0000000 --- a/kubernetes_manifest/3-Deploy-Uber-App.md +++ /dev/null @@ -1,13 +0,0 @@ -# Deploy The Uber App - -Once the EKS cluster is built and the Kubernetes manifest is ready, you're now ready to deploy the Kubernetes manifest. - -1. `cd` into the `kubernetes_manifest` directory -2. Run the following command: -`kubectl create -f deployment.yml` - -You'll see an output that specifies the service and deployment was created. - -3. Run the following command to confirm that the deployment was successful: -`kubectl get deployments` - diff --git a/kubernetes_manifest/deployment.yml b/kubernetes_manifest/deployment.yml deleted file mode 100644 index 2e0e355..0000000 --- a/kubernetes_manifest/deployment.yml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: uber-ui -spec: - selector: - app: uber-ui - ports: - - protocol: "TCP" - port: 6000 - targetPort: 5000 - type: LoadBalancer - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: uber-ui -spec: - selector: - matchLabels: - app: uber-ui - replicas: 2 - template: - metadata: - labels: - app: uber-ui - spec: - containers: - - name: uber-ui - image: 912101370089.dkr.ecr.us-east-1.amazonaws.com/devopsthehardway-ecr-repo:latest - ports: - - containerPort: 5000 \ No newline at end of file diff --git a/prerequisites.md b/prerequisites.md index ec63da2..0cdecf9 100644 --- a/prerequisites.md +++ b/prerequisites.md @@ -1,46 +1,382 @@ -# Prerequisites +# πŸ“‹ Prerequisites - Interactive Learning Setup -## Prior Knowledge In Tech +> **⏱️ Setup Time:** 30-45 minutes | **πŸ’‘ One-time setup for the entire tutorial series** -DevOps isn't an entry level role by any means if it's being done correctly. There's a lot of knowledge you need prior, including: -- Some sort of cloud engineering/cloud knowledge experience. Although not all environments are running in the cloud, most of these roles will want it. -- Scripting/automation/programming experience. You don't have to go out and write the next Twitter, but you should understand the basics of programming. -- Network, storage, and compute knowledge. -- Held a prior systems administration, infrastructure engineer, or cloud engineer role. +## 🎯 **Learning Path Overview** -## AWS Account +This prerequisites guide ensures you have everything needed for the **11 interactive tutorials** in this comprehensive DevOps learning platform. Each tutorial includes validation steps, troubleshooting guides, and hands-on practice scenarios. -### Create An AWS Account +## πŸ’Ό **Professional DevOps Background** -To follow along with this tutorial, you should have an AWS account. If you don't already have one, you can sign up for a free 12 month trial [here](https://aws.amazon.com/free/?all-free-tier.sort-by=item.additionalFields.SortRank&all-free-tier.sort-order=asc&awsf.Free%20Tier%20Types=*all&awsf.Free%20Tier%20Categories=*all). +### **πŸ“š Required Experience Level** -You should know that the tutorial for *DevOps The Hard Way* will cost money because some of the services that you use in AWS will not be part of the free tier. +**🎯 This is an intermediate-to-advanced tutorial series.** Success requires: -To learn more about the AWS Pricing Model so you understand what the cost will be, you can go [here](https://aws.amazon.com/pricing/) +**βœ… Cloud Engineering Foundation:** +- [ ] **Azure fundamentals** - Understanding of basic Azure services and concepts +- [ ] **Infrastructure concepts** - Compute, networking, storage, and security principles +- [ ] **Previous cloud projects** - Hands-on experience with cloud deployments -### Use the AWS CLI +**βœ… Development & Automation Skills:** +- [ ] **Scripting proficiency** - Bash, PowerShell, or Python automation experience +- [ ] **Version control** - Git workflows and collaborative development +- [ ] **Command-line comfort** - Terminal/CLI operations and troubleshooting -The AWS CLI is a way for you to interact with all AWS services at a programmatic level using the terminal. +**βœ… Infrastructure & Operations:** +- [ ] **System administration** - Linux/Windows server management experience +- [ ] **Network fundamentals** - VPNs, firewalls, load balancers, DNS +- [ ] **Storage systems** - Understanding of different storage types and use cases -To set this up, follow the directions [here](https://docs.aws.amazon.com/polly/latest/dg/setup-aws-cli.html) +**βœ… Previous Roles (Recommended):** +- Systems Administrator, Infrastructure Engineer, Cloud Engineer, Site Reliability Engineer, or similar technical operations role -## Installations -You will need to download some software and services for this tutorial. +> **πŸ’‘ New to DevOps?** Consider completing Azure fundamentals training and gaining basic cloud experience before starting this advanced tutorial series. -### Code Editor +## ☁️ **Azure Account & Subscription Setup** -Because code will be written for *DevOps The Hard Way*, you will need a code editor. For the purposes of this tutorial, you can use [Visual Studio Code](https://code.visualstudio.com/download), which is a free code editor. +### **πŸ—οΈ Azure Account Requirements** -### Terraform +1. **πŸ” Create Azure Account** + - **Free Account:** [Sign up for 12-month free trial](https://azure.microsoft.com/free/) + - **Existing Account:** Verify active subscription with sufficient credits + - **Organization Account:** Ensure Contributor or Owner permissions -[Terraform Download](https://www.terraform.io/downloads.html) +2. **πŸ’° Cost Planning & Budgets** + + **πŸ“Š Expected Costs by Environment:** + - **πŸ’‘ Learning/Development:** $50-100/month (recommended for this tutorial) + - **πŸš€ Production-equivalent:** $200-500/month + - **⚑ Minimal testing:** $20-50/month (with aggressive cleanup) -### Docker -To build the Docker image, you can use Docker Desktop for Windows or MacOS. + **πŸ’‘ Cost Management Tips:** + - Set up [Azure budgets and alerts](https://docs.microsoft.com/en-us/azure/cost-management-billing/costs/tutorial-acm-create-budgets) + - Use [Azure Pricing Calculator](https://azure.microsoft.com/pricing/calculator/) for estimates + - Delete resources immediately after tutorials if cost is a concern + - Consider [Azure Dev/Test pricing](https://azure.microsoft.com/pricing/dev-test/) if eligible -[Docker Desktop](https://www.docker.com/products/docker-desktop) +3. **πŸ”’ Security & Governance Setup** + ```bash + # Verify your Azure permissions + az role assignment list --assignee $(az account show --query user.name -o tsv) --output table + + # Should see Contributor or Owner role + ``` -### Source Control -To store the code that you'll be writing, you can create your very own GitHub account to showcase your project. +## πŸ› οΈ **Required Software Installation** -[GitHub](https://www.github.com) \ No newline at end of file +### **☁️ Azure CLI - Primary Interface** + +**🎯 Purpose:** Command-line interface for all Azure operations throughout the tutorials. + +```bash +# Installation verification +az --version +az login +az account show --output table +``` + +**πŸ“¦ Installation Options:** +- **Windows:** [Azure CLI Installer](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-windows) +- **macOS:** `brew install azure-cli` +- **Linux:** [Package manager installation](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-linux) + +### **πŸ’» Code Editor - Development Environment** + +**🎯 Recommended:** [Visual Studio Code](https://code.visualstudio.com/download) with extensions: + +**πŸ“¦ Essential VS Code Extensions:** +- **Azure Account** - Azure integration and authentication +- **Azure Terraform** - Terraform syntax highlighting and validation +- **Docker** - Container management and Dockerfile editing +- **Kubernetes** - YAML validation and cluster management +- **YAML** - Enhanced YAML editing for Kubernetes manifests + +### **πŸ—οΈ Terraform - Infrastructure as Code** + +**🎯 Version Requirement:** v1.9.8 or higher + +```bash +# Verify installation +terraform --version +# Should show: Terraform v1.9.8 or higher +``` + +**πŸ“¦ Installation:** +- **Download:** [Terraform Downloads](https://www.terraform.io/downloads.html) +- **macOS:** `brew install terraform` +- **Windows:** Use Chocolatey or manual download +- **Linux:** Package manager or manual installation + +**βš™οΈ Configuration:** +```bash +# Verify Terraform can access Azure +terraform init +terraform providers +``` + +### **🐳 Docker - Containerization Platform** + +**🎯 Purpose:** Container creation, testing, and local development. + +**πŸ“¦ Installation Options:** +- **Docker Desktop:** [Download for Windows/macOS](https://www.docker.com/products/docker-desktop) +- **Linux:** [Install Docker Engine](https://docs.docker.com/engine/install/) + +**βœ… Verification:** +```bash +# Test Docker installation +docker --version +docker run hello-world + +# Verify Docker can build images +docker build --help +``` + +### **☸️ Kubernetes Tools - Cluster Management** + +**🎯 kubectl - Kubernetes command-line tool** + +**πŸ“¦ Installation:** [Install kubectl](https://kubernetes.io/docs/tasks/tools/) + +```bash +# Verify installation +kubectl version --client +# Should show version 1.35.x or compatible +``` + +**πŸ” kubelogin - Azure authentication plugin** + +**πŸ“¦ Installation:** [Azure/kubelogin](https://github.com/Azure/kubelogin) + +```bash +# Verify installation +kubelogin --version + +# Test Azure integration +az aks get-credentials --help +``` + +### **πŸ” Source Control - Version Management** + +**🎯 GitHub Account Setup** + +1. **πŸ“ Create Account:** [GitHub.com](https://github.com) +2. **πŸ” Configure Authentication:** + ```bash + # Configure Git + git config --global user.name "Your Name" + git config --global user.email "your.email@example.com" + + # Verify GitHub access + git clone https://github.com/thomast1906/DevOps-The-Hard-Way-Azure.git + ``` + +3. **πŸš€ GitHub CLI (Optional but Recommended):** + ```bash + # Install GitHub CLI + # macOS: brew install gh + # Windows: winget install GitHub.cli + + # Authenticate + gh auth login + ``` + +## πŸ”§ **Advanced Tools Setup (Required for Complete Experience)** + +### **🐍 Python Environment** + +**🎯 Version:** Python 3.13 or higher for application understanding and automation. + +```bash +# Verify Python installation +python3 --version +pip3 --version + +# Install virtual environment support +pip3 install virtualenv +``` + +### **πŸ›‘οΈ Security Scanning Tools** + +**πŸ” Checkov - Infrastructure Security Scanner** + +```bash +# Install Checkov +pip3 install checkov==3.2.4 + +# Verify installation +checkov --version +checkov --help +``` + +**πŸ”’ tfsec - Terraform Security Scanner** *(Optional but Recommended)* + +```bash +# macOS installation +brew install tfsec + +# Manual installation +# Download from: https://github.com/aquasecurity/tfsec/releases + +# Verify installation +tfsec --version +``` + +### **πŸ“– Documentation Automation** + +**πŸ“š terraform-docs - Documentation Generator** + +```bash +# macOS installation +brew install terraform-docs + +# Manual installation +# Download from: https://github.com/terraform-docs/terraform-docs/releases + +# Verify installation +terraform-docs --version +``` + +## βœ… **Pre-Tutorial Validation** + +**πŸ”§ Complete System Check** + +Run this comprehensive validation script to ensure everything is properly configured: + +```bash +#!/bin/bash +echo "πŸ” DevOps Learning Platform - System Validation" +echo "==============================================" + +# Check Azure CLI +if command -v az &> /dev/null; then + echo "βœ… Azure CLI: $(az --version | head -n1)" + if az account show &> /dev/null; then + echo "βœ… Azure Authentication: Active" + else + echo "❌ Azure Authentication: Please run 'az login'" + fi +else + echo "❌ Azure CLI: Not installed" +fi + +# Check Terraform +if command -v terraform &> /dev/null; then + echo "βœ… Terraform: $(terraform --version | head -n1)" +else + echo "❌ Terraform: Not installed" +fi + +# Check Docker +if command -v docker &> /dev/null; then + echo "βœ… Docker: $(docker --version)" + if docker ps &> /dev/null; then + echo "βœ… Docker Service: Running" + else + echo "⚠️ Docker Service: Not running" + fi +else + echo "❌ Docker: Not installed" +fi + +# Check kubectl +if command -v kubectl &> /dev/null; then + echo "βœ… kubectl: $(kubectl version --client --short)" +else + echo "❌ kubectl: Not installed" +fi + +# Check Python +if command -v python3 &> /dev/null; then + echo "βœ… Python: $(python3 --version)" +else + echo "❌ Python3: Not installed" +fi + +# Check Checkov +if command -v checkov &> /dev/null; then + echo "βœ… Checkov: $(checkov --version)" +else + echo "⚠️ Checkov: Not installed (recommended for security tutorials)" +fi + +echo "" +echo "🎯 Validation complete! Review any ❌ items before starting tutorials." +``` + +## πŸš€ **Learning Environment Setup** + +### **πŸ“‚ Workspace Organization** + +```bash +# Create organized workspace +mkdir -p ~/devops-learning +cd ~/devops-learning + +# Clone the tutorial repository +git clone https://github.com/thomast1906/DevOps-The-Hard-Way-Azure.git +cd DevOps-The-Hard-Way-Azure + +# Verify tutorial structure +ls -la +``` + +### **πŸ”§ Environment Variables** *(Optional but Helpful)* + +```bash +# Add to your shell profile (.bashrc, .zshrc, etc.) +export AZURE_RESOURCE_GROUP="devopsthehardway-rg" +export AZURE_LOCATION="uksouth" +export TUTORIAL_PATH="$HOME/devops-learning/DevOps-The-Hard-Way-Azure" + +# Source your profile +source ~/.zshrc # or ~/.bashrc +``` + +## πŸŽ“ **Next Steps - Begin Your DevOps Journey** + +**βœ… Prerequisites Complete?** Start with the foundation tutorials: + +1. **πŸ—„οΈ [Configure Terraform Remote Storage](1-Azure/1-Configure-Terraform-Remote-Storage.md)** *(10-15 min)* +2. **πŸ‘₯ [Create Azure AD Group for AKS Admins](1-Azure/2-Create-Azure-AD-Group-AKS-Admins.md)** *(8-12 min)* + +**πŸ“š Learning Tips:** +- **Follow sequentially** - Each tutorial builds on the previous +- **Use validation scripts** - Verify your progress at each step +- **Practice troubleshooting** - Read error messages and use provided solutions +- **Take breaks** - Complex topics benefit from reflection time +- **Document your journey** - Keep notes for future reference + +**πŸš€ Ready to transform your DevOps skills?** [Start with the foundation setup!](1-Azure/1-Configure-Terraform-Remote-Storage.md) + +### Additional Tools (Optional but Recommended) + +#### Python +Python 3.13 or higher for running automation scripts and understanding the sample application. +[Python Downloads](https://www.python.org/downloads/) + +#### Checkov +Static analysis tool for infrastructure as code security scanning. +```bash +pip install checkov==3.2.4 +``` + +#### tfsec +Security scanner for Terraform code. +```bash +# macOS +brew install tfsec + +# Or download from GitHub releases +# https://github.com/aquasecurity/tfsec/releases +``` + +#### terraform-docs +Generate documentation from Terraform modules. +```bash +# macOS +brew install terraform-docs + +# Or download from GitHub releases +# https://github.com/terraform-docs/terraform-docs/releases +``` diff --git a/renovate.json b/renovate.json new file mode 100644 index 0000000..e657ca3 --- /dev/null +++ b/renovate.json @@ -0,0 +1,7 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "includeForks": true +} \ No newline at end of file diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 0000000..0cc4dcd --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,231 @@ +# Automation Scripts Documentation + +This directory contains automation scripts for deploying and managing the DevOps The Hard Way Azure infrastructure. + +## πŸ“ Scripts Overview + +### πŸš€ `deploy-all.sh` +**Purpose:** Complete infrastructure and application deployment + +**What it does:** +- βœ… Validates prerequisites (Azure CLI, Terraform, Docker, kubectl, Helm) +- βœ… Creates Terraform remote state storage +- βœ… Creates Azure AD group for AKS admins +- βœ… Deploys all Terraform infrastructure (ACR, VNET, Log Analytics, AKS) +- βœ… Builds and pushes Docker image to ACR +- βœ… Deploys application to Kubernetes +- βœ… Installs ALB Controller and Gateway resources +- βœ… Provides application URL for testing + +**Usage:** +```bash +# Deploy with default settings +./scripts/deploy-all.sh + +# Deploy with custom project name and location +PROJECT_NAME="myproject" LOCATION="westeurope" ./scripts/deploy-all.sh +``` + +**Environment Variables:** +- `PROJECT_NAME` (default: `devopsthehardway`) - Project name prefix +- `LOCATION` (default: `uksouth`) - Azure region + +--- + +### πŸ—‘οΈ `cleanup-all.sh` +**Purpose:** Complete resource cleanup and destruction + +**What it does:** +- ⚠️ Prompts for confirmation (type 'DELETE') +- πŸ—‘οΈ Removes Kubernetes resources (deployments, services, namespaces) +- πŸ—‘οΈ Uninstalls ALB Controller and Gateway resources +- πŸ—‘οΈ Destroys all Terraform infrastructure (in reverse order) +- πŸ—‘οΈ Deletes Azure resource groups +- πŸ—‘οΈ Optionally removes Terraform state storage +- πŸ—‘οΈ Optionally cleans up local Docker images + +**Usage:** +```bash +# Interactive cleanup (recommended) +./scripts/cleanup-all.sh + +# Automated cleanup (for CI/CD) +echo "DELETE" | ./scripts/cleanup-all.sh +``` + +**Safety Features:** +- Requires typing 'DELETE' to confirm +- Optional Terraform state cleanup +- Optional local Docker image cleanup +- Progress indicators and error handling + +--- + +### πŸ§ͺ `quick-test.sh` +**Purpose:** Full deployment cycle for testing + +**What it does:** +- πŸš€ Runs complete deployment with timestamped project name +- ⏸️ Pauses for manual testing and verification +- πŸ—‘οΈ Automatically cleans up resources after confirmation + +**Usage:** +```bash +# Run quick test with random project name +./scripts/quick-test.sh + +# Run quick test with specific settings +PROJECT_NAME="test123" ./scripts/quick-test.sh +``` + +**Perfect for:** +- Testing changes before production +- Demonstrating the full solution +- CI/CD pipeline validation + +--- + +## πŸ€– GitHub Actions Workflows + +### `.github/workflows/deploy-full.yml` +**Purpose:** Complete CI/CD pipeline for Azure deployment + +**Triggers:** +- πŸ“‹ Push to `main` branch +- πŸ“‹ Pull requests to `main` branch +- πŸ“‹ Manual workflow dispatch with options + +**Features:** +- πŸ” Azure OIDC authentication +- πŸ—οΈ Multi-environment support (dev/staging/prod) +- πŸ§ͺ Optional post-deployment cleanup +- πŸ“Š Deployment summaries and reports +- πŸ” Application health testing + +**Environment Inputs:** +- `environment` - Target environment (dev/staging/prod) +- `destroy_after_deploy` - Auto-cleanup for testing + +**Required Secrets:** +- `AZURE_AD_CLIENT_ID` - Azure service principal client ID +- `AZURE_AD_TENANT_ID` - Azure tenant ID +- `AZURE_SUBSCRIPTION_ID` - Azure subscription ID + +--- + +## πŸ› οΈ Prerequisites + +Before running any scripts, ensure you have: + +### Required Tools +- βœ… **Azure CLI** (2.0+) - `az --version` +- βœ… **Terraform** (1.9.8+) - `terraform version` +- βœ… **Docker** - `docker --version` +- βœ… **kubectl** - `kubectl version --client` +- βœ… **Helm** (3.0+) - `helm version` + +### Azure Setup +- βœ… Azure subscription with appropriate permissions +- βœ… Logged into Azure CLI (`az login`) +- βœ… Service principal for GitHub Actions (if using CI/CD) + +### Permissions Required +- βœ… Contributor role on subscription +- βœ… Azure AD permissions to create groups +- βœ… Ability to create resource groups +- βœ… Container Registry permissions + +--- + +## πŸ“‹ Usage Patterns + +### πŸŽ“ Learning/Tutorial Mode +```bash +# Follow the manual labs step by step +# Use individual Terraform commands for each component +``` + +### πŸ§ͺ Development/Testing Mode +```bash +# Quick iteration testing +./scripts/quick-test.sh + +# Keep environment for extended testing +./scripts/deploy-all.sh +# ... test your changes ... +./scripts/cleanup-all.sh +``` + +### πŸš€ Production Deployment Mode +```bash +# Use GitHub Actions with proper environment controls +# Manual review and approval processes +# Environment-specific configurations +``` + +--- + +## πŸ”§ Customization + +### Project Configuration +Edit the scripts to modify: +- Default project names and locations +- Resource sizing (VM sizes, node counts) +- Network configurations +- Tags and metadata + +### Environment-Specific Settings +The scripts support environment variables for: +- `PROJECT_NAME` - Resource naming prefix +- `LOCATION` - Azure region +- Custom terraform.tfvars content + +### Example Customizations +```bash +# Deploy to different region with custom name +PROJECT_NAME="mycompany-prod" LOCATION="westeurope" ./scripts/deploy-all.sh + +# Use different Kubernetes version +export KUBERNETES_VERSION="1.32" +./scripts/deploy-all.sh +``` + +--- + +## 🚨 Important Notes + +### Resource Cleanup +- Resource group deletions run in background (10-15 minutes) +- Some resources may have soft-delete policies +- Check Azure Portal to confirm complete cleanup + +### Cost Management +- AKS clusters incur ongoing costs +- Use auto-scaling and spot instances for dev/test +- Always clean up test environments + +### Security Considerations +- SSH keys are generated for each deployment +- Azure AD groups control AKS access +- Use RBAC and least-privilege principles +- Rotate secrets regularly + +### Troubleshooting +- Check script output for detailed error messages +- Verify Azure CLI authentication +- Ensure required permissions +- Check Terraform state for conflicts + +--- + +## 🎯 Best Practices + +1. **Always test in dev environment first** +2. **Use version control for custom modifications** +3. **Monitor resource usage and costs** +4. **Keep secrets secure and rotated** +5. **Use descriptive project names for multiple environments** +6. **Document any customizations** +7. **Regularly update tool versions** + +For more detailed information, see the individual lab documentation in the repository. diff --git a/scripts/cleanup-all.sh b/scripts/cleanup-all.sh new file mode 100755 index 0000000..df4dd4d --- /dev/null +++ b/scripts/cleanup-all.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# DevOps The Hard Way - Azure - Cleanup Script +# This script deletes the resource groups which removes all resources + +set -e # Exit on any error + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Configuration +PROJECT_NAME="${PROJECT_NAME:-devopsthehardway}" +LOCATION="${LOCATION:-uksouth}" +RESOURCE_GROUP="${PROJECT_NAME}-rg" +# Must match 1-Azure/scripts/1-create-terraform-storage.sh naming +TERRAFORM_RG="${TF_RG:-devopshardway-rg}" + +echo -e "${RED}πŸ—‘οΈ DevOps The Hard Way - Azure - CLEANUP${NC}" +echo -e "${RED}⚠️ WARNING: This will DELETE ALL resources by removing resource groups!${NC}" +echo -e "${YELLOW}Project: ${PROJECT_NAME}${NC}" +echo -e "${YELLOW}Resource Groups to Delete:${NC}" +echo -e "${YELLOW} β€’ ${RESOURCE_GROUP}${NC}" +echo -e "${YELLOW} β€’ ${PROJECT_NAME}-node-rg${NC}" +echo -e "${YELLOW} β€’ ${TERRAFORM_RG} (optional)${NC}" +echo "" + +# Confirmation prompt +read -p "Are you sure you want to delete all resource groups? Type 'DELETE' to confirm: " confirmation +if [ "$confirmation" != "DELETE" ]; then + echo -e "${GREEN}βœ… Cleanup cancelled${NC}" + exit 0 +fi + +echo "" +echo -e "${RED}πŸ—‘οΈ Starting resource group deletion...${NC}" + +# Check if logged into Azure +if ! az account show &> /dev/null; then + echo -e "${RED}❌ Not logged into Azure. Please login first.${NC}" + az login +fi + +echo -e "${YELLOW}πŸ“‹ Deleting main resource group: ${RESOURCE_GROUP}${NC}" +az group delete --name "$RESOURCE_GROUP" --yes --no-wait 2>/dev/null || echo -e "${YELLOW}⚠️ Resource group may not exist${NC}" + +echo -e "${YELLOW}πŸ“‹ Deleting AKS node resource group: ${PROJECT_NAME}-node-rg${NC}" +az group delete --name "${PROJECT_NAME}-node-rg" --yes --no-wait 2>/dev/null || echo -e "${YELLOW}⚠️ Node resource group may not exist${NC}" + +# Optional: Delete Terraform state storage +echo "" +echo -e "${YELLOW}⚠️ Do you want to delete the Terraform state storage as well?${NC}" +echo -e "${YELLOW}This will remove: ${TERRAFORM_RG}${NC}" +read -p "Delete Terraform state storage? (y/N): " delete_state + +if [[ "$delete_state" =~ ^[Yy]$ ]]; then + echo -e "${YELLOW}πŸ“‹ Deleting Terraform state resource group: ${TERRAFORM_RG}${NC}" + az group delete --name "$TERRAFORM_RG" --yes --no-wait 2>/dev/null || echo -e "${YELLOW}⚠️ Terraform state resource group may not exist${NC}" + echo -e "${GREEN}βœ… Terraform state storage cleanup initiated${NC}" +else + echo -e "${BLUE}ℹ️ Terraform state storage preserved${NC}" +fi + +echo "" +echo -e "${GREEN}βœ… Resource group deletion initiated!${NC}" +echo "" +echo -e "${BLUE}πŸ“‹ What's being deleted:${NC}" +echo "β€’ Main resource group: $RESOURCE_GROUP" +echo " - AKS cluster and all nodes" +echo " - Virtual Network and subnets" +echo " - Log Analytics workspace" +echo " - Azure Container Registry" +echo " - Load balancers and public IPs" +echo " - All networking components" +echo "β€’ AKS node resource group: ${PROJECT_NAME}-node-rg" +echo " - AKS node VMs and disks" +echo " - Load balancers and networking" +if [[ "$delete_state" =~ ^[Yy]$ ]]; then + echo "β€’ Terraform state storage: $TERRAFORM_RG" +fi + +echo "" +echo -e "${BLUE}πŸ“‹ Notes:${NC}" +echo "β€’ Resource group deletions are running in the background" +echo "β€’ It may take 10-15 minutes for all resources to be fully removed" +echo "β€’ Check deletion progress: az group list --query \"[?contains(name,'$PROJECT_NAME')]\"" +echo "β€’ Check Azure Portal to confirm all resources are deleted" + +echo "" +echo -e "${GREEN}πŸŽ‰ DevOps The Hard Way - Azure cleanup completed!${NC}" diff --git a/scripts/deploy-all.sh b/scripts/deploy-all.sh new file mode 100755 index 0000000..b0a7937 --- /dev/null +++ b/scripts/deploy-all.sh @@ -0,0 +1,224 @@ +#!/bin/bash + +# DevOps The Hard Way - Azure - Full Deployment Script +# This script deploys the entire infrastructure and application +# Run from the repository root: ./scripts/deploy-all.sh + +set -e # Exit on any error + +# Resolve repo root regardless of where the script is called from +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Configuration +PROJECT_NAME="${PROJECT_NAME:-devopsthehardway}" +LOCATION="${LOCATION:-uksouth}" +RESOURCE_GROUP="${PROJECT_NAME}-rg" +# TF backend names match 1-Azure/scripts/1-create-terraform-storage.sh and providers.tf hardcoded values +TF_RG="${TF_RG:-devopshardway-rg}" +TF_SA="${TF_SA:-devopshardwaysa}" +TF_CONTAINER="${TF_CONTAINER:-tfstate}" + +echo -e "${BLUE}πŸš€ Starting DevOps The Hard Way - Azure Deployment${NC}" +echo -e "${BLUE}Project: ${PROJECT_NAME}${NC}" +echo -e "${BLUE}Location: ${LOCATION}${NC}" +echo "" + +# Function to print step headers +print_step() { + echo -e "${GREEN}πŸ“‹ Step $1: $2${NC}" + echo "----------------------------------------" +} + +# Function to check if command exists +command_exists() { + command -v "$1" >/dev/null 2>&1 +} + +# Check prerequisites +print_step "0" "Checking Prerequisites" +if ! command_exists "az"; then + echo -e "${RED}❌ Azure CLI not found. Please install it first.${NC}" + exit 1 +fi + +if ! command_exists "terraform"; then + echo -e "${RED}❌ Terraform not found. Please install it first.${NC}" + exit 1 +fi + +if ! command_exists "docker"; then + echo -e "${RED}❌ Docker not found. Please install it first.${NC}" + exit 1 +fi + +if ! command_exists "kubectl"; then + echo -e "${RED}❌ kubectl not found. Please install it first.${NC}" + exit 1 +fi + +if ! command_exists "helm"; then + echo -e "${RED}❌ Helm not found. Please install it first.${NC}" + exit 1 +fi + +echo -e "${GREEN}βœ… All prerequisites met${NC}" +echo "" + +# Check Azure login +print_step "1" "Verifying Azure Authentication" +if ! az account show &> /dev/null; then + echo -e "${YELLOW}⚠️ Not logged into Azure. Please login first.${NC}" + az login +fi + +SUBSCRIPTION_ID=$(az account show --query id -o tsv) +echo -e "${GREEN}βœ… Logged into Azure (Subscription: ${SUBSCRIPTION_ID})${NC}" +echo "" + +# Step 2: Create Storage Account for Terraform State +print_step "2" "Creating Terraform Remote State Storage" +cd "$REPO_ROOT/1-Azure" +if [ -f "scripts/1-create-terraform-storage.sh" ]; then + chmod +x scripts/1-create-terraform-storage.sh + ./scripts/1-create-terraform-storage.sh +else + echo -e "${YELLOW}⚠️ Creating storage account manually...${NC}" + az group create --name "$TF_RG" --location "$LOCATION" + az storage account create --name "$TF_SA" --resource-group "$TF_RG" --location "$LOCATION" --sku Standard_LRS + az storage container create --name "$TF_CONTAINER" --account-name "$TF_SA" +fi +echo "" + +# Step 3: Create Azure AD Group +print_step "3" "Creating Azure AD Group for AKS Admins" +GROUP_DISPLAY_NAME="AKS-Admins-${PROJECT_NAME}" +EXISTING_GROUP_ID=$(az ad group show --group "$GROUP_DISPLAY_NAME" --query id -o tsv 2>/dev/null || true) +if [ -n "$EXISTING_GROUP_ID" ]; then + GROUP_ID="$EXISTING_GROUP_ID" + echo -e "${GREEN}βœ… Reusing existing AD group: ${GROUP_DISPLAY_NAME} (${GROUP_ID})${NC}" +else + GROUP_ID=$(az ad group create \ + --display-name "$GROUP_DISPLAY_NAME" \ + --mail-nickname "aks-admins-${PROJECT_NAME}" \ + --query id -o tsv) + echo -e "${GREEN}βœ… Created AD group: ${GROUP_DISPLAY_NAME} (${GROUP_ID})${NC}" +fi +echo "" + +# Helper: run terraform init with dynamic backend config +tf_init() { + local key="$1" + terraform init \ + -backend-config="resource_group_name=${TF_RG}" \ + -backend-config="storage_account_name=${TF_SA}" \ + -backend-config="container_name=${TF_CONTAINER}" \ + -backend-config="key=${key}" +} + +# Step 4: Deploy Infrastructure with Terraform +print_step "4" "Deploying Azure Container Registry (ACR)" +cd "$REPO_ROOT/2-Terraform-AZURE-Services-Creation/1-acr" +tf_init "acr-terraform.tfstate" +terraform plan -out=tfplan +terraform apply tfplan +echo "" + +print_step "5" "Deploying Virtual Network (VNET)" +cd "$REPO_ROOT/2-Terraform-AZURE-Services-Creation/2-vnet" +tf_init "vnet-terraform.tfstate" +terraform plan -out=tfplan +terraform apply tfplan +echo "" + +print_step "6" "Deploying Log Analytics Workspace" +cd "$REPO_ROOT/2-Terraform-AZURE-Services-Creation/3-log-analytics" +tf_init "la-terraform.tfstate" +terraform plan -out=tfplan +terraform apply tfplan +echo "" + +print_step "7" "Deploying AKS Cluster and IAM Roles" +cd "$REPO_ROOT/2-Terraform-AZURE-Services-Creation/4-aks" +tf_init "aks-terraform.tfstate" +# Override the AD group ID with the one created/found in step 3 +terraform plan -out=tfplan -var "aks_admins_group_object_id=${GROUP_ID}" +terraform apply tfplan + +# Get AKS credentials +echo -e "${YELLOW}πŸ“‹ Getting AKS credentials...${NC}" +az aks get-credentials --resource-group "$RESOURCE_GROUP" --name "${PROJECT_NAME}aks" --overwrite-existing --admin +echo "" + +# Step 5: Build and Push Docker Image +print_step "8" "Building and Pushing Docker Image" +cd "$REPO_ROOT/3-Docker" + +echo -e "${YELLOW}πŸ“‹ Building Docker image for AMD64 platform...${NC}" +docker build --platform linux/amd64 -t "${PROJECT_NAME}azurecr.azurecr.io/thomasthorntoncloud:v2" . + +echo -e "${YELLOW}πŸ“‹ Logging into ACR...${NC}" +az acr login --name "${PROJECT_NAME}azurecr" + +echo -e "${YELLOW}πŸ“‹ Pushing image to ACR...${NC}" +docker push "${PROJECT_NAME}azurecr.azurecr.io/thomasthorntoncloud:v2" +echo "" + +# Step 6: Deploy Kubernetes Resources +print_step "9" "Deploying Application to Kubernetes" +cd "$REPO_ROOT/4-kubernetes_manifest" + +echo -e "${YELLOW}πŸ“‹ Deploying application manifest...${NC}" +kubectl apply -f deployment.yml + +echo -e "${YELLOW}πŸ“‹ Installing ALB Controller...${NC}" +chmod +x scripts/1-alb-controller-install-k8s.sh +./scripts/1-alb-controller-install-k8s.sh + +echo -e "${YELLOW}πŸ“‹ Waiting for ALB Controller to be ready...${NC}" +kubectl wait --for=condition=available --timeout=300s deployment/alb-controller -n azure-alb-system + +echo -e "${YELLOW}πŸ“‹ Creating Gateway API resources...${NC}" +chmod +x scripts/2-gateway-api-resources.sh +./scripts/2-gateway-api-resources.sh + +echo -e "${YELLOW}πŸ“‹ Waiting for application to be ready...${NC}" +kubectl wait --for=condition=available --timeout=300s deployment/thomasthornton -n thomasthorntoncloud +echo "" + +# Step 7: Get Application URL +print_step "10" "Getting Application URL" +echo -e "${YELLOW}πŸ“‹ Waiting for gateway to get external IP...${NC}" +sleep 30 + +GATEWAY_IP=$(kubectl get gateway gateway-01 -n thomasthorntoncloud -o jsonpath='{.status.addresses[0].value}' 2>/dev/null || echo "") + +if [ -n "$GATEWAY_IP" ]; then + echo -e "${GREEN}πŸŽ‰ Deployment Successful!${NC}" + echo -e "${GREEN}🌐 Application URL: http://$GATEWAY_IP${NC}" + echo "" + echo -e "${BLUE}πŸ“‹ Testing application...${NC}" + if curl -s -f "http://$GATEWAY_IP" > /dev/null; then + echo -e "${GREEN}βœ… Application is responding correctly!${NC}" + else + echo -e "${YELLOW}⚠️ Application may still be starting up. Please wait a few minutes and try: http://$GATEWAY_IP${NC}" + fi +else + echo -e "${YELLOW}⚠️ Gateway IP not yet available. Check status with:${NC}" + echo "kubectl get gateway gateway-01 -n thomasthorntoncloud" +fi + +echo "" +echo -e "${GREEN}πŸŽ‰ DevOps The Hard Way - Azure deployment completed!${NC}" +echo -e "${BLUE}πŸ“‹ Next steps:${NC}" +echo "1. Visit your application at the URL above" +echo "2. Monitor resources: kubectl get pods -A" +echo "3. Check logs: kubectl logs -n thomasthorntoncloud deployment/thomasthornton" +echo "4. Clean up when done: ./scripts/cleanup-all.sh"