FEAT: 통신 설정 수정 #200
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Deploy ToGather Microservices to AWS EKS | |
| on: | |
| push: | |
| branches: [ main ] | |
| pull_request: | |
| branches: [ main ] | |
| workflow_dispatch: | |
| env: | |
| AWS_REGION: ap-northeast-2 | |
| ECR_REGISTRY: ${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.ap-northeast-2.amazonaws.com | |
| EKS_CLUSTER_NAME: togather-cluster | |
| EKS_NAMESPACE: togather | |
| CDN_URL: https://d36ue99r8i68ow.cloudfront.net | |
| S3_BUCKET_NAME: togather-static-assets | |
| CLOUDFRONT_DISTRIBUTION_ID: E15ZDIW40YBVEN | |
| jobs: | |
| build-and-deploy: | |
| runs-on: ubuntu-latest | |
| steps: | |
| # --- 공통 준비 단계 ---!!!!!! | |
| - name: Checkout Server Repository | |
| uses: actions/checkout@v4 | |
| - name: Set gradlew permissions | |
| run: chmod +x gradlew | |
| - name: Configure AWS credentials | |
| uses: aws-actions/configure-aws-credentials@v4 | |
| with: | |
| aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | |
| aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | |
| aws-region: ${{ env.AWS_REGION }} | |
| - name: Login to Amazon ECR | |
| id: login-ecr | |
| uses: aws-actions/amazon-ecr-login@v2 | |
| - name: Checkout Client Repository | |
| uses: actions/checkout@v4 | |
| with: | |
| repository: ToGather-Final/ToGather-Client | |
| token: ${{ secrets.GH_PAT }} | |
| path: togather-client | |
| - name: Setup pnpm | |
| uses: pnpm/action-setup@v4 | |
| with: | |
| version: latest | |
| - name: Cache pnpm dependencies | |
| uses: actions/cache@v3 | |
| with: | |
| path: | | |
| ~/.pnpm-store | |
| togather-client/node_modules | |
| key: ${{ runner.os }}-pnpm-${{ hashFiles('togather-client/pnpm-lock.yaml') }} | |
| restore-keys: | | |
| ${{ runner.os }}-pnpm- | |
| - name: Setup JDK 17 | |
| uses: actions/setup-java@v4 | |
| with: | |
| java-version: '17' | |
| distribution: 'temurin' | |
| - name: Cache Gradle packages | |
| uses: actions/cache@v3 | |
| with: | |
| path: | | |
| ~/.gradle/caches | |
| ~/.gradle/wrapper | |
| key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle*') }} | |
| restore-keys: | | |
| ${{ runner.os }}-gradle- | |
| # --- Next.js 클라이언트 먼저 빌드 (빠른 피드백) --- | |
| - name: Build Next.js Client (Priority) | |
| run: | | |
| cd togather-client | |
| export CDN_URL_CLEAN=$(echo "${{ env.CDN_URL }}" | sed 's:/*$::') | |
| echo "🛰 Using CDN_URL=$CDN_URL_CLEAN" | |
| pnpm install --frozen-lockfile | |
| # ✅ Next.js 단일 빌드 (Node runtime) | |
| SERVER_API_BASE_URL=http://api-gateway.togather.svc.cluster.local:8000/api \ | |
| NEXT_PUBLIC_API_BASE_URL=https://xn--o79aq2k062a.store/api \ | |
| NEXT_PUBLIC_WS_URL=wss://xn--o79aq2k062a.store/ws \ | |
| CDN_URL=$CDN_URL_CLEAN pnpm run build | |
| # ✅ 빌드 결과 검증 (server.js 또는 next-server.js 중 하나라도 있으면 성공) | |
| if [ ! -f ".next/standalone/server.js" ] && [ ! -f ".next/standalone/server.mjs" ] && [ ! -f ".next/standalone/next-server.js" ]; then | |
| echo "❌ Next.js standalone server entry not found!" | |
| echo "🔍 Available files in .next/standalone:" | |
| ls -la .next/standalone/ || echo "No standalone directory found" | |
| echo "🔍 Available files in .next:" | |
| ls -la .next/ || echo "No .next directory found" | |
| exit 1 | |
| else | |
| echo "✅ Next.js standalone build successful" | |
| if [ -f ".next/standalone/server.js" ]; then | |
| echo "✅ Found server.js" | |
| fi | |
| if [ -f ".next/standalone/next-server.js" ]; then | |
| echo "✅ Found next-server.js" | |
| fi | |
| fi | |
| # ✅ .next/server 제외하고 아티팩트 생성 | |
| cd .. | |
| tar -czf next-artifacts.tgz -C togather-client .next/standalone .next/static public src/app src/components src/contexts src/hooks src/lib src/utils src/constants src/types src/services src/containers | |
| echo "✅ next-artifacts.tgz 생성 완료" | |
| # --- Next.js 빌드 아티팩트 업로드 (1회만) --- | |
| - name: Upload Next build artifacts (Priority) | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: next-artifacts | |
| path: next-artifacts.tgz | |
| # --- Docker 빌드 --- | |
| - name: Download and Extract Next.js Build Artifacts (Priority) | |
| uses: actions/download-artifact@v4 | |
| with: | |
| name: next-artifacts | |
| path: ./togather-client | |
| - name: Extract Next.js Artifacts (Priority) | |
| run: | | |
| tar -xzf ./togather-client/next-artifacts.tgz -C ./togather-client | |
| echo "✅ next-artifacts.tgz 압축 해제 완료" | |
| - name: Build and Push Next.js Runtime Image (Priority) | |
| continue-on-error: true | |
| run: | | |
| echo "🚀 Building Next.js runtime image..." | |
| cd togather-client | |
| if docker build \ | |
| --build-arg SERVER_API_BASE_URL=http://api-gateway.togather.svc.cluster.local:8000/api \ | |
| --build-arg NEXT_PUBLIC_API_BASE_URL=https://xn--o79aq2k062a.store/api \ | |
| --build-arg NEXT_PUBLIC_WS_URL=wss://xn--o79aq2k062a.store/ws \ | |
| --build-arg CDN_URL=${{ env.CDN_URL }} \ | |
| -t $ECR_REGISTRY/togather/client:${{ github.sha }} \ | |
| -t $ECR_REGISTRY/togather/client:latest \ | |
| -f Dockerfile .; then | |
| echo "✅ Next.js build successful" | |
| if docker push $ECR_REGISTRY/togather/client:${{ github.sha }} && docker push $ECR_REGISTRY/togather/client:latest; then | |
| echo "✅ Next.js Docker image successfully built and pushed!" | |
| else | |
| echo "❌ Next.js push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "❌ Next.js build failed" | |
| exit 1 | |
| fi | |
| # --- S3 정적 자산 업로드 (동일한 빌드 결과 사용) --- | |
| - name: Upload Static Assets to S3 (Priority) | |
| run: | | |
| echo "🚀 Uploading static assets to S3..." | |
| cd togather-client | |
| # ✅ 동일한 빌드 결과를 S3에 업로드 | |
| pnpm run upload-assets | |
| echo "✅ Static assets uploaded to S3 successfully!" | |
| env: | |
| CDN_URL: ${{ env.CDN_URL }} | |
| S3_BUCKET_NAME: ${{ env.S3_BUCKET_NAME }} | |
| CLOUDFRONT_DISTRIBUTION_ID: ${{ env.CLOUDFRONT_DISTRIBUTION_ID }} | |
| # --- S3 버킷 정책 설정 --- | |
| - name: Configure S3 Bucket Policy for CloudFront | |
| run: | | |
| echo "🔧 Configuring S3 bucket policy for CloudFront access..." | |
| aws s3api put-bucket-policy \ | |
| --bucket ${{ env.S3_BUCKET_NAME }} \ | |
| --policy file://scripts/s3-bucket-policy.json | |
| echo "✅ S3 bucket policy configured successfully!" | |
| # --- kubectl 설치 (Next.js 배포를 위해) --- | |
| - name: Install kubectl (Priority) | |
| uses: azure/setup-kubectl@v3 | |
| with: | |
| version: 'v1.28.0' | |
| # --- Next.js 클라이언트 EKS 배포 (빠른 피드백) --- | |
| - name: Deploy Next.js Client to EKS (Priority) | |
| if: github.ref == 'refs/heads/main' | |
| run: | | |
| echo "🚀 Deploying Next.js Client first for quick feedback..." | |
| # kubectl 설정 | |
| aws eks update-kubeconfig --region ${{ env.AWS_REGION }} --name ${{ env.EKS_CLUSTER_NAME }} | |
| export ECR_REGISTRY=${{ env.ECR_REGISTRY }} | |
| # 기존 Next.js 클라이언트 Pod 정리 (문제가 있는 Pod들 제거) | |
| echo "🧹 Cleaning up existing Next.js Client pods..." | |
| kubectl delete pods -l app=nextjs-client -n ${{ env.EKS_NAMESPACE }} --ignore-not-found=true || true | |
| # Next.js 클라이언트만 먼저 배포 | |
| echo "📦 Deploying Next.js Client..." | |
| if envsubst < k8s/nextjs-client.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }}; then | |
| echo "✅ Next.js Client deployed successfully" | |
| # 이미지 업데이트 | |
| if kubectl set image deployment/nextjs-client nextjs-client=$ECR_REGISTRY/togather/client:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true; then | |
| echo "✅ Next.js Client image updated successfully" | |
| # Pod 재시작 대기 | |
| echo "⏳ Waiting for Next.js Client pods to be ready..." | |
| kubectl rollout status deployment/nextjs-client -n ${{ env.EKS_NAMESPACE }} --timeout=300s || echo "⚠️ Rollout timeout, but continuing..." | |
| else | |
| echo "❌ Next.js Client image update failed" | |
| fi | |
| else | |
| echo "❌ Next.js Client deployment failed" | |
| fi | |
| - name: Build with Gradle (Backend) | |
| run: | | |
| chmod +x gradlew | |
| ./gradlew build -x test | |
| # --- Spring Services Docker Build (with error handling) ---- | |
| - name: Build and Push API Gateway image | |
| continue-on-error: true | |
| run: | | |
| echo "🚀 Building API Gateway image..." | |
| if docker build -f api-gateway/Dockerfile -t $ECR_REGISTRY/togather/api-gateway:${{ github.sha }} .; then | |
| echo "✅ API Gateway build successful" | |
| docker tag $ECR_REGISTRY/togather/api-gateway:${{ github.sha }} $ECR_REGISTRY/togather/api-gateway:latest | |
| if docker push $ECR_REGISTRY/togather/api-gateway:${{ github.sha }} && docker push $ECR_REGISTRY/togather/api-gateway:latest; then | |
| echo "✅ API Gateway pushed successfully" | |
| else | |
| echo "❌ API Gateway push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "❌ API Gateway build failed" | |
| exit 1 | |
| fi | |
| - name: Build and Push User Service image | |
| continue-on-error: true | |
| run: | | |
| echo "🚀 Building User Service image..." | |
| if docker build -f user-service/Dockerfile -t $ECR_REGISTRY/togather/user-service:${{ github.sha }} .; then | |
| echo "✅ User Service build successful" | |
| docker tag $ECR_REGISTRY/togather/user-service:${{ github.sha }} $ECR_REGISTRY/togather/user-service:latest | |
| if docker push $ECR_REGISTRY/togather/user-service:${{ github.sha }} && docker push $ECR_REGISTRY/togather/user-service:latest; then | |
| echo "✅ User Service pushed successfully" | |
| else | |
| echo "❌ User Service push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "❌ User Service build failed" | |
| exit 1 | |
| fi | |
| - name: Build and Push Trading Service image | |
| continue-on-error: true | |
| run: | | |
| echo "🚀 Building Trading Service image..." | |
| if docker build -f trading-service/Dockerfile -t $ECR_REGISTRY/togather/trading-service:${{ github.sha }} .; then | |
| echo "✅ Trading Service build successful" | |
| docker tag $ECR_REGISTRY/togather/trading-service:${{ github.sha }} $ECR_REGISTRY/togather/trading-service:latest | |
| if docker push $ECR_REGISTRY/togather/trading-service:${{ github.sha }} && docker push $ECR_REGISTRY/togather/trading-service:latest; then | |
| echo "✅ Trading Service pushed successfully" | |
| else | |
| echo "❌ Trading Service push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "❌ Trading Service build failed" | |
| exit 1 | |
| fi | |
| - name: Build and Push Pay Service image | |
| continue-on-error: true | |
| run: | | |
| echo "🚀 Building Pay Service image..." | |
| if docker build -f pay-service/Dockerfile -t $ECR_REGISTRY/togather/pay-service:${{ github.sha }} .; then | |
| echo "✅ Pay Service build successful" | |
| docker tag $ECR_REGISTRY/togather/pay-service:${{ github.sha }} $ECR_REGISTRY/togather/pay-service:latest | |
| if docker push $ECR_REGISTRY/togather/pay-service:${{ github.sha }} && docker push $ECR_REGISTRY/togather/pay-service:latest; then | |
| echo "✅ Pay Service pushed successfully" | |
| else | |
| echo "❌ Pay Service push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "❌ Pay Service build failed" | |
| exit 1 | |
| fi | |
| - name: Build and Push Vote Service image | |
| continue-on-error: true | |
| run: | | |
| echo "🚀 Building Vote Service image..." | |
| if docker build -f vote-service/Dockerfile -t $ECR_REGISTRY/togather/vote-service:${{ github.sha }} .; then | |
| echo "✅ Vote Service build successful" | |
| docker tag $ECR_REGISTRY/togather/vote-service:${{ github.sha }} $ECR_REGISTRY/togather/vote-service:latest | |
| if docker push $ECR_REGISTRY/togather/vote-service:${{ github.sha }} && docker push $ECR_REGISTRY/togather/vote-service:latest; then | |
| echo "✅ Vote Service pushed successfully" | |
| else | |
| echo "❌ Vote Service push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "❌ Vote Service build failed" | |
| exit 1 | |
| fi | |
| # --- Spring Services Build Summary --- | |
| - name: Spring Services Build Summary | |
| run: | | |
| echo "📊 Spring Services Build Summary:" | |
| echo "==================================" | |
| echo "✅ API Gateway: ${{ steps.build-api-gateway.outcome }}" | |
| echo "✅ User Service: ${{ steps.build-user-service.outcome }}" | |
| echo "✅ Trading Service: ${{ steps.build-trading-service.outcome }}" | |
| echo "✅ Pay Service: ${{ steps.build-pay-service.outcome }}" | |
| echo "✅ Vote Service: ${{ steps.build-vote-service.outcome }}" | |
| # --- EKS 배포 --- | |
| - name: Configure kubectl for EKS | |
| run: | | |
| aws eks update-kubeconfig --region ${{ env.AWS_REGION }} --name ${{ env.EKS_CLUSTER_NAME }} | |
| - name: Deploy to EKS | |
| if: github.ref == 'refs/heads/main' | |
| run: | | |
| kubectl apply -f k8s/namespace.yaml | |
| kubectl apply -f k8s/api-gateway-rbac.yaml -n ${{ env.EKS_NAMESPACE }} | |
| export ECR_REGISTRY=${{ env.ECR_REGISTRY }} | |
| export CERTIFICATE_ARN=${{ secrets.CERTIFICATE_ARN }} | |
| # Secrets 생성 | |
| kubectl apply -f - <<EOF | |
| apiVersion: v1 | |
| kind: Secret | |
| metadata: | |
| name: togather-secrets | |
| namespace: ${{ env.EKS_NAMESPACE }} | |
| type: Opaque | |
| stringData: | |
| SPRING_DATASOURCE_USERNAME: "${{ secrets.DB_USERNAME }}" | |
| SPRING_DATASOURCE_PASSWORD: "${{ secrets.DB_PASSWORD }}" | |
| DB_USERNAME: "${{ secrets.DB_USERNAME }}" | |
| DB_PASSWORD: "${{ secrets.DB_PASSWORD }}" | |
| JWT_SECRET_KEY: "${{ secrets.JWT_SECRET_KEY }}" | |
| JWT_SECRET: "${{ secrets.JWT_SECRET_KEY }}" | |
| SPRING_RABBITMQ_USERNAME: "admin" | |
| SPRING_RABBITMQ_PASSWORD: "${{ secrets.RABBITMQ_PASSWORD }}" | |
| REDIS_PASSWORD: "${{ secrets.REDIS_PASSWORD }}" | |
| SPRING_DATA_REDIS_PASSWORD: "${{ secrets.REDIS_PASSWORD }}" | |
| EOF | |
| # 리소스 배포 (with error handling) | |
| echo "🚀 Deploying Kubernetes resources..." | |
| FAILED_DEPLOYMENTS="" | |
| SUCCESS_DEPLOYMENTS="" | |
| for file in api-gateway user-service trading-service pay-service vote-service ingress; do | |
| echo "📦 Deploying $file..." | |
| if envsubst < k8s/$file.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }}; then | |
| echo "✅ $file deployed successfully" | |
| SUCCESS_DEPLOYMENTS="$SUCCESS_DEPLOYMENTS $file" | |
| else | |
| echo "❌ $file deployment failed" | |
| FAILED_DEPLOYMENTS="$FAILED_DEPLOYMENTS $file" | |
| fi | |
| done | |
| # Additional resources | |
| echo "📦 Deploying additional resources..." | |
| for resource in configmap redis rabbitmq hpa; do | |
| echo "📦 Deploying $resource..." | |
| if kubectl apply -f k8s/$resource.yaml -n ${{ env.EKS_NAMESPACE }}; then | |
| echo "✅ $resource deployed successfully" | |
| SUCCESS_DEPLOYMENTS="$SUCCESS_DEPLOYMENTS $resource" | |
| else | |
| echo "❌ $resource deployment failed" | |
| FAILED_DEPLOYMENTS="$FAILED_DEPLOYMENTS $resource" | |
| fi | |
| done | |
| # 최신 이미지 반영 (with error handling) | |
| echo "🔄 Updating service images..." | |
| for svc in api-gateway user-service trading-service pay-service vote-service; do | |
| echo "🔄 Updating $svc image..." | |
| if kubectl set image deployment/$svc $svc=$ECR_REGISTRY/togather/$svc:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true; then | |
| echo "✅ $svc image updated successfully" | |
| else | |
| echo "❌ $svc image update failed" | |
| FAILED_DEPLOYMENTS="$FAILED_DEPLOYMENTS $svc-image" | |
| fi | |
| done | |
| # 배포 결과 요약 | |
| echo "" | |
| echo "📊 Deployment Summary:" | |
| echo "=====================" | |
| if [ -n "$SUCCESS_DEPLOYMENTS" ]; then | |
| echo "✅ Successful: $SUCCESS_DEPLOYMENTS" | |
| fi | |
| if [ -n "$FAILED_DEPLOYMENTS" ]; then | |
| echo "❌ Failed: $FAILED_DEPLOYMENTS" | |
| echo "⚠️ Some deployments failed, but continuing..." | |
| fi | |
| # Next.js 클라이언트 최종 상태 확인 | |
| echo "" | |
| echo "🔍 Next.js Client Final Status:" | |
| echo "===============================" | |
| kubectl get pods -l app=nextjs-client -n ${{ env.EKS_NAMESPACE }} -o wide || echo "❌ Failed to get Next.js Client pods" | |
| # Next.js 클라이언트 Health Check | |
| echo "" | |
| echo "🏥 Next.js Client Health Check:" | |
| NEXTJS_POD=$(kubectl get pods -l app=nextjs-client -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$NEXTJS_POD" ]; then | |
| echo "📋 Pod: $NEXTJS_POD" | |
| kubectl logs $NEXTJS_POD -n ${{ env.EKS_NAMESPACE }} --tail=20 || echo "❌ Failed to get logs" | |
| else | |
| echo "❌ No Next.js Client pod found" | |
| fi | |
| # --- 배포 완료 후 Prewarm --- | |
| - name: Prewarm Next.js Client | |
| if: github.ref == 'refs/heads/main' | |
| run: | | |
| echo "🔥 Prewarming Next.js..." | |
| SITE_URL="https://xn--o79aq2k062a.store" | |
| for i in {1..5}; do | |
| STATUS=$(curl -s -o /dev/null -w "%{http_code}" "$SITE_URL" || echo "000") | |
| echo "Request $i => HTTP $STATUS" | |
| sleep 3 | |
| done | |
| echo "✅ Prewarm completed!" | |
| # --- 상세한 Pod 상태 점검 및 로그 출력 --- | |
| - name: Detailed Pod Status Check | |
| if: always() | |
| run: | | |
| echo "🔍 Detailed Pod Status Check" | |
| echo "============================" | |
| # 모든 Pod 상태 확인 | |
| echo "📊 All Pods Status:" | |
| kubectl get pods -n ${{ env.EKS_NAMESPACE }} -o wide || echo "❌ Failed to get pods" | |
| # Next.js Client Pod 상태 상세 확인 | |
| echo "" | |
| echo "🚀 Next.js Client Pod Status:" | |
| NEXTJS_PODS=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$NEXTJS_PODS" ]; then | |
| for pod in $NEXTJS_PODS; do | |
| echo "📋 Pod: $pod" | |
| kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.phase}' 2>/dev/null && echo "" | |
| kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 2>/dev/null && echo " (Ready)" | |
| kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.containerStatuses[0].ready}' 2>/dev/null && echo " (Container Ready)" | |
| done | |
| else | |
| echo "❌ No Next.js Client pods found" | |
| fi | |
| # API Gateway Pod 상태 확인 | |
| echo "" | |
| echo "🌐 API Gateway Pod Status:" | |
| API_PODS=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$API_PODS" ]; then | |
| for pod in $API_PODS; do | |
| echo "📋 Pod: $pod" | |
| kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.phase}' 2>/dev/null && echo "" | |
| kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 2>/dev/null && echo " (Ready)" | |
| done | |
| else | |
| echo "❌ No API Gateway pods found" | |
| fi | |
| # --- 비정상 Pod 로그 출력 --- | |
| - name: Unhealthy Pod Logs | |
| if: always() | |
| run: | | |
| echo "🚨 Unhealthy Pod Logs Check" | |
| echo "==========================" | |
| # Next.js Client 비정상 Pod 로그 | |
| echo "🔍 Next.js Client Pod Logs:" | |
| NEXTJS_PODS=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$NEXTJS_PODS" ]; then | |
| for pod in $NEXTJS_PODS; do | |
| echo "📋 Pod: $pod" | |
| READY_STATUS=$(kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 2>/dev/null || echo "Unknown") | |
| if [ "$READY_STATUS" != "True" ]; then | |
| echo "⚠️ Pod $pod is not ready (Status: $READY_STATUS)" | |
| echo "📝 Recent logs from $pod:" | |
| kubectl logs $pod -n ${{ env.EKS_NAMESPACE }} --tail=50 || echo "❌ Failed to get logs from $pod" | |
| echo "" | |
| echo "📝 Pod description for $pod:" | |
| kubectl describe pod $pod -n ${{ env.EKS_NAMESPACE }} || echo "❌ Failed to describe pod $pod" | |
| echo "" | |
| else | |
| echo "✅ Pod $pod is ready" | |
| fi | |
| done | |
| fi | |
| # API Gateway 비정상 Pod 로그 | |
| echo "🔍 API Gateway Pod Logs:" | |
| API_PODS=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$API_PODS" ]; then | |
| for pod in $API_PODS; do | |
| echo "📋 Pod: $pod" | |
| READY_STATUS=$(kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 2>/dev/null || echo "Unknown") | |
| if [ "$READY_STATUS" != "True" ]; then | |
| echo "⚠️ Pod $pod is not ready (Status: $READY_STATUS)" | |
| echo "📝 Recent logs from $pod:" | |
| kubectl logs $pod -n ${{ env.EKS_NAMESPACE }} --tail=50 || echo "❌ Failed to get logs from $pod" | |
| echo "" | |
| echo "📝 Pod description for $pod:" | |
| kubectl describe pod $pod -n ${{ env.EKS_NAMESPACE }} || echo "❌ Failed to describe pod $pod" | |
| echo "" | |
| else | |
| echo "✅ Pod $pod is ready" | |
| fi | |
| done | |
| fi | |
| # --- Health Check 상태 확인 --- | |
| - name: Health Check Status | |
| if: always() | |
| run: | | |
| echo "🏥 Health Check Status" | |
| echo "=====================" | |
| # Next.js Client health check | |
| echo "🔍 Next.js Client Health Check:" | |
| NEXTJS_PODS=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$NEXTJS_PODS" ]; then | |
| for pod in $NEXTJS_PODS; do | |
| echo "📋 Testing /api/healthz on pod: $pod" | |
| POD_IP=$(kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.podIP}' 2>/dev/null || echo "") | |
| if [ -n "$POD_IP" ]; then | |
| echo "🌐 Pod IP: $POD_IP" | |
| # Pod 내부에서 health check 테스트 | |
| kubectl exec $pod -n ${{ env.EKS_NAMESPACE }} -- curl -s -o /dev/null -w "%{http_code}" http://localhost:3000/api/healthz 2>/dev/null || echo "❌ Health check failed" | |
| else | |
| echo "❌ Could not get pod IP" | |
| fi | |
| done | |
| fi | |
| # --- 에러 발생 시 상세 진단 --- | |
| - name: Detailed Error Diagnostics | |
| if: failure() | |
| run: | | |
| echo "🚨 DEPLOYMENT FAILED - Detailed diagnostics..." | |
| echo "=============================================" | |
| # 모든 Pod 상태 | |
| echo "📊 All Pods Status:" | |
| kubectl get pods -n ${{ env.EKS_NAMESPACE }} -o wide || echo "❌ Failed to get pods" | |
| # 이벤트 확인 | |
| echo "" | |
| echo "📝 Recent Events:" | |
| kubectl get events -n ${{ env.EKS_NAMESPACE }} --sort-by='.lastTimestamp' | tail -20 || echo "❌ Failed to get events" | |
| # 서비스 상태 | |
| echo "" | |
| echo "🌐 Services Status:" | |
| kubectl get svc -n ${{ env.EKS_NAMESPACE }} || echo "❌ Failed to get services" | |
| # Ingress 상태 | |
| echo "" | |
| echo "🔗 Ingress Status:" | |
| kubectl get ingress -n ${{ env.EKS_NAMESPACE }} || echo "❌ Failed to get ingress" | |
| #name: Deploy ToGather Microservices to AWS EKS | |
| # | |
| #on: | |
| # push: | |
| # branches: [ main ] | |
| # pull_request: | |
| # branches: [ main ] | |
| # workflow_dispatch: | |
| # | |
| #env: | |
| # AWS_REGION: ap-northeast-2 | |
| # ECR_REGISTRY: ${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.ap-northeast-2.amazonaws.com | |
| # EKS_CLUSTER_NAME: togather-cluster | |
| # EKS_NAMESPACE: togather | |
| # # CDN 설정! | |
| # CDN_URL: https://d36ue99r8i68ow.cloudfront.net | |
| # S3_BUCKET_NAME: togather-static-assets | |
| # CLOUDFRONT_DISTRIBUTION_ID: E15ZDIW40YBVEN | |
| # | |
| #jobs: | |
| # build-and-deploy: | |
| # runs-on: ubuntu-latest | |
| # | |
| # steps: | |
| # - name: Checkout code | |
| # uses: actions/checkout@v4 | |
| # | |
| # - name: Set gradlew permissions | |
| # run: chmod +x gradlew | |
| # | |
| # - name: Configure AWS credentials | |
| # uses: aws-actions/configure-aws-credentials@v4 | |
| # with: | |
| # aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | |
| # aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | |
| # aws-region: ${{ env.AWS_REGION }} | |
| # | |
| # - name: Login to Amazon ECR | |
| # id: login-ecr | |
| # uses: aws-actions/amazon-ecr-login@v2 | |
| # | |
| # - name: Checkout Client Repo # ToGather-Client 접근! | |
| # uses: actions/checkout@v4 | |
| # with: | |
| # repository: ToGather-Final/ToGather-Client | |
| # token: ${{ secrets.GH_PAT }} | |
| # path: togather-client | |
| # | |
| # - name: Setup pnpm | |
| # uses: pnpm/action-setup@v4 | |
| # with: | |
| # version: latest | |
| # | |
| # - name: Cache pnpm dependencies | |
| # uses: actions/cache@v3 | |
| # with: | |
| # path: | | |
| # ~/.pnpm-store | |
| # togather-client/node_modules | |
| # key: ${{ runner.os }}-pnpm-${{ hashFiles('togather-client/pnpm-lock.yaml') }} | |
| # restore-keys: | | |
| # ${{ runner.os }}-pnpm- | |
| # | |
| # - name: Set up JDK 17 | |
| # uses: actions/setup-java@v4 | |
| # with: | |
| # java-version: '17' | |
| # distribution: 'temurin' | |
| # | |
| # - name: Cache Gradle packages | |
| # uses: actions/cache@v3 | |
| # with: | |
| # path: | | |
| # ~/.gradle/caches | |
| # ~/.gradle/wrapper | |
| # key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle*') }} | |
| # restore-keys: | | |
| # ${{ runner.os }}-gradle- | |
| # | |
| # - name: Build with Gradle | |
| # run: | | |
| # chmod +x gradlew | |
| # ./gradlew build -x test | |
| # | |
| # # - name: Build and push Next.js Client image | |
| # # run: | | |
| # # docker build -t $ECR_REGISTRY/togather/client:${{ github.sha }} -f ./togather-client/Dockerfile ./togather-client | |
| # # docker build -t $ECR_REGISTRY/togather/client:latest -f ./togather-client/Dockerfile ./togather-client | |
| # # docker push $ECR_REGISTRY/togather/client:${{ github.sha }} | |
| # # docker push $ECR_REGISTRY/togather/client:latest | |
| # | |
| # - name: Build Next.js Client and upload static assets | |
| # run: | | |
| # cd togather-client | |
| # export CDN_URL_CLEAN=$(echo "${{ env.CDN_URL }}" | sed 's:/*$::') | |
| # echo "🛰 Using CDN_URL=$CDN_URL_CLEAN" | |
| # | |
| # pnpm install --frozen-lockfile | |
| # | |
| # # ✅ Next.js 빌드 (단 1회) | |
| # SERVER_API_BASE_URL=http://api-gateway.togather.svc.cluster.local:8000/api \ | |
| # NEXT_PUBLIC_API_BASE_URL=https://xn--o79aq2k062a.store/api \ | |
| # CDN_URL=$CDN_URL_CLEAN pnpm run build | |
| # | |
| # # ✅ 빌드 산출물 아티팩트로 보관 (Docker와 S3에서 동일 빌드 사용) | |
| # cd .. | |
| # tar -czf next-artifacts.tgz -C togather-client .next/standalone .next/static .next/server public | |
| # echo "✅ next-artifacts.tgz 생성 완료" | |
| # | |
| # # ✅ 정적 자산 업로드 및 CDN 무효화 | |
| # cd togather-client | |
| # pnpm run upload-assets | |
| # env: | |
| # CDN_URL: ${{ env.CDN_URL }} | |
| # S3_BUCKET_NAME: ${{ env.S3_BUCKET_NAME }} | |
| # CLOUDFRONT_DISTRIBUTION_ID: ${{ env.CLOUDFRONT_DISTRIBUTION_ID }} | |
| # NEXT_PUBLIC_API_BASE_URL: https://xn--o79aq2k062a.store | |
| # | |
| # # ✅ next-artifacts.tgz 파일을 업로드 (빌드 결과 백업) | |
| # - name: Upload Next build artifacts | |
| # uses: actions/upload-artifact@v4 | |
| # with: | |
| # name: next-artifacts | |
| # path: next-artifacts.tgz | |
| # | |
| # # ✅ 빌드 결과를 Docker 이미지 생성에 재사용 | |
| # - name: Download Next.js build artifacts | |
| # uses: actions/download-artifact@v4 | |
| # with: | |
| # name: next-artifacts | |
| # path: ./togather-client | |
| # | |
| # echo "✅ Next.js build completed. Checking output structure..." | |
| # ls -R .next | head -n 40 | |
| # | |
| # # ✅ server entry 검증 | |
| # if [ ! -f ".next/standalone/server.js" ] && [ ! -f ".next/standalone/server.mjs" ]; then | |
| # echo "❌ .next/standalone/server.js(.mjs) not found!" | |
| # echo "빌드 산출물 누락. 빌드 실패로 간주합니다." | |
| # ls -R .next/standalone | head -n 50 || true | |
| # exit 1 | |
| # fi | |
| # | |
| # | |
| # # ✅ 빌드 산출물 아티팩트로 보관 (Docker와 S3에서 동일 빌드 사용)!!! | |
| # cd .. | |
| # tar -czf next-artifacts.tgz -C togather-client .next/standalone .next/static public | |
| # echo "✅ next-artifacts.tgz 생성 완료" | |
| # | |
| # - name: Build and push Next.js runtime image | |
| # run: | | |
| # echo "🚀 Building Next.js runtime image..." | |
| # docker build \ | |
| # --build-arg SERVER_API_BASE_URL=http://api-gateway.togather.svc.cluster.local:8000/api \ | |
| # --build-arg NEXT_PUBLIC_API_BASE_URL=https://xn--o79aq2k062a.store/api \ | |
| # --build-arg CDN_URL=${{ env.CDN_URL }} \ | |
| # -t $ECR_REGISTRY/togather/client:${{ github.sha }} \ | |
| # -f ./togather-client/Dockerfile ./togather-client | |
| # | |
| # # ✅ 태그 및 푸시 | |
| # docker tag $ECR_REGISTRY/togather/client:${{ github.sha }} $ECR_REGISTRY/togather/client:latest | |
| # docker push $ECR_REGISTRY/togather/client:${{ github.sha }} | |
| # docker push $ECR_REGISTRY/togather/client:latest | |
| # | |
| # # ✅ 빌드 검증 로그 | |
| # echo "✅ Docker image for Next.js successfully built and pushed!" | |
| # | |
| # | |
| # - name: Upload Next build artifacts | |
| # uses: actions/upload-artifact@v4 | |
| # with: | |
| # name: next-artifacts | |
| # path: next-artifacts.tgz | |
| # | |
| # - name: Download Next.js build artifacts | |
| # uses: actions/download-artifact@v4 | |
| # with: | |
| # name: next-artifacts | |
| # path: ./togather-client | |
| # | |
| # - name: Extract Next.js artifacts | |
| # run: | | |
| # tar -xzf ./togather-client/next-artifacts.tgz -C ./togather-client | |
| # echo "✅ next-artifacts.tgz 압축 해제 완료" | |
| # ls -R ./togather-client/.next/standalone | head -n 20 | |
| # | |
| # # ✅ kr 도입을 위해 추가 --build-arg CDN_URL=${{ env.CDN_URL }} \ | |
| # - name: Build and push Next.js runtime image | |
| # run: | | |
| # echo "🚀 Building Next.js runtime image..." | |
| # cd togather-client | |
| # | |
| # # 빌드 산출물 검증 | |
| # if [ ! -f ".next/standalone/server.js" ]; then | |
| # echo "❌ server.js not found in .next/standalone — build failed!" | |
| # ls -R .next | head -n 40 | |
| # exit 1 | |
| # fi | |
| # | |
| # docker build \ | |
| # --build-arg SERVER_API_BASE_URL=http://api-gateway.togather.svc.cluster.local:8000/api \ | |
| # --build-arg NEXT_PUBLIC_API_BASE_URL=https://xn--o79aq2k062a.store/api \ | |
| # --build-arg CDN_URL=${{ env.CDN_URL }} \ | |
| # -t $ECR_REGISTRY/togather/client:${{ github.sha }} \ | |
| # -t $ECR_REGISTRY/togather/client:latest \ | |
| # -f Dockerfile . | |
| # | |
| # docker push $ECR_REGISTRY/togather/client:${{ github.sha }} | |
| # docker push $ECR_REGISTRY/togather/client:latest | |
| # | |
| # - name: Build and push Vote Service image | |
| # run: | | |
| # docker build -f vote-service/Dockerfile -t $ECR_REGISTRY/togather/vote-service:${{ github.sha }} . | |
| # docker build -f vote-service/Dockerfile -t $ECR_REGISTRY/togather/vote-service:latest . | |
| # docker push $ECR_REGISTRY/togather/vote-service:${{ github.sha }} | |
| # docker push $ECR_REGISTRY/togather/vote-service:latest | |
| # | |
| # - name: Install kubectl | |
| # uses: azure/setup-kubectl@v3 | |
| # with: | |
| # version: 'v1.28.0' | |
| # | |
| # - name: Configure kubectl for EKS | |
| # run: | | |
| # aws eks update-kubeconfig --region ${{ env.AWS_REGION }} --name ${{ env.EKS_CLUSTER_NAME }} | |
| # | |
| # - name: Deploy to EKS | |
| # if: github.ref == 'refs/heads/main' | |
| # run: | | |
| # # 1. 네임스페이스 생성 (없으면 생성, 있으면 넘어감) | |
| # kubectl apply -f k8s/namespace.yaml | |
| # | |
| # # 2. Secret 생성 (모든 민감 정보를 GitHub Secrets에서 가져와 통합) | |
| # kubectl apply -f - <<EOF | |
| # apiVersion: v1 | |
| # kind: Secret | |
| # metadata: | |
| # name: togather-secrets | |
| # namespace: ${{ env.EKS_NAMESPACE }} | |
| # type: Opaque | |
| # stringData: | |
| # SPRING_DATASOURCE_USERNAME: "${{ secrets.DB_USERNAME }}" | |
| # SPRING_DATASOURCE_PASSWORD: "${{ secrets.DB_PASSWORD }}" | |
| # DB_USERNAME: "${{ secrets.DB_USERNAME }}" | |
| # DB_PASSWORD: "${{ secrets.DB_PASSWORD }}" | |
| # JWT_SECRET_KEY: "${{ secrets.JWT_SECRET_KEY }}" | |
| # JWT_SECRET: "${{ secrets.JWT_SECRET_KEY }}" | |
| # SPRING_RABBITMQ_USERNAME: "admin" | |
| # SPRING_RABBITMQ_PASSWORD: "${{ secrets.RABBITMQ_PASSWORD }}" | |
| # RABBITMQ_USERNAME: "admin" | |
| # RABBITMQ_PASSWORD: "${{ secrets.RABBITMQ_PASSWORD }}" | |
| # REDIS_PASSWORD: "${{ secrets.REDIS_PASSWORD }}" | |
| # SPRING_DATA_REDIS_PASSWORD: "${{ secrets.REDIS_PASSWORD }}" | |
| # # KIS 증권 API 환경변수 | |
| # KIS_BASE_URL: "${{ secrets.KIS_BASE_URL }}" | |
| # KIS_APPKEY: "${{ secrets.KIS_APPKEY }}" | |
| # KIS_APPSECRET: "${{ secrets.KIS_APPSECRET }}" | |
| # EOF | |
| # | |
| # # 3. RBAC 먼저 적용 (ServiceAccount/Role/RoleBinding) | |
| # kubectl apply -f k8s/api-gateway-rbac.yaml -n ${{ env.EKS_NAMESPACE }} | |
| # | |
| # # 4. 환경변수 치환 후 나머지 모든 리소스 배포 | |
| # # ECR_REGISTRY와 CERTIFICATE_ARN 환경변수를 k8s 파일들에 치환 | |
| # export ECR_REGISTRY=${{ env.ECR_REGISTRY }} | |
| # export CERTIFICATE_ARN=${{ secrets.CERTIFICATE_ARN }} | |
| # | |
| # # 환경변수 치환이 필요한 파일들 배포 | |
| # envsubst < k8s/api-gateway.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/user-service.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/trading-service.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/pay-service.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/vote-service.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/nextjs-client.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/ingress.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # | |
| # # 나머지 리소스들 (ConfigMap, Service, HPA 등) 배포 | |
| # kubectl apply -f k8s/configmap.yaml -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl apply -f k8s/configmap-env.yaml -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl apply -f k8s/redis.yaml -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl apply -f k8s/rabbitmq.yaml -n ${{ env.EKS_NAMESPACE }} | |
| # # ingress.yaml과 nextjs-client.yaml은 위에서 envsubst로 이미 배포됨 | |
| # kubectl apply -f k8s/hpa.yaml -n ${{ env.EKS_NAMESPACE }} # 🚀 HPA 재활성화 | |
| # | |
| # # 5. 각 디플로이먼트에 커밋 SHA 이미지 설정 (롤아웃 트리거) | |
| # kubectl set image deployment/api-gateway api-gateway=${{ env.ECR_REGISTRY }}/togather/api-gateway:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # kubectl set image deployment/user-service user-service=${{ env.ECR_REGISTRY }}/togather/user-service:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # kubectl set image deployment/trading-service trading-service=${{ env.ECR_REGISTRY }}/togather/trading-service:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # kubectl set image deployment/pay-service pay-service=${{ env.ECR_REGISTRY }}/togather/pay-service:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # kubectl set image deployment/vote-service vote-service=${{ env.ECR_REGISTRY }}/togather/vote-service:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # kubectl set image deployment/nextjs-client nextjs-client=${{ env.ECR_REGISTRY }}/togather/client:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # | |
| # - name: Invalidate CloudFront Cache | |
| # if: github.ref == 'refs/heads/main' | |
| # uses: chetan/invalidate-cloudfront-action@v2 | |
| # env: | |
| # DISTRIBUTION: ${{ env.CLOUDFRONT_DISTRIBUTION_ID }} | |
| # PATHS: "/*" # 💡 모든 파일(*)을 무효화하여 즉시 변경사항 반영d | |
| # AWS_REGION: "us-east-1" # 인증서는 us-east-1에 있어야 하므로 명시 | |
| # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} | |
| # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | |
| # | |
| # - name: Wait for deployment to complete | |
| # if: github.ref == 'refs/heads/main' | |
| # run: | | |
| # # 배포 결과 추적을 위한 변수 초기화 | |
| # FAILED_SERVICES="" | |
| # SUCCESS_SERVICES="" | |
| # | |
| # # Redis 배포 확인 | |
| # echo "🚀 Redis 배포 상태 확인 중..." | |
| # if kubectl rollout status deployment/redis -n ${{ env.EKS_NAMESPACE }} --timeout=60s; then | |
| # echo "✅ Redis 배포 성공" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES redis" | |
| # else | |
| # echo "❌ Redis 배포 실패! 진단 정보 출력 중..." | |
| # FAILED_SERVICES="$FAILED_SERVICES redis" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=redis | |
| # kubectl describe deployment redis -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=redis -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # API Gateway 배포 확인!! | |
| # echo "" | |
| # echo "🚀 API Gateway 배포 상태 확인 중..." | |
| # if kubectl rollout status deployment/api-gateway -n ${{ env.EKS_NAMESPACE }} --timeout=240s; then | |
| # echo "✅ API Gateway 배포 성공" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES api-gateway" | |
| # else | |
| # echo "❌ API Gateway 배포 실패! 진단 정보 출력 중..." | |
| # FAILED_SERVICES="$FAILED_SERVICES api-gateway" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway | |
| # kubectl describe deployment api-gateway -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # User Service 배포 확인 | |
| # echo "" | |
| # echo "🚀 User Service 배포 상태 확인 중..." | |
| # if kubectl rollout status deployment/user-service -n ${{ env.EKS_NAMESPACE }} --timeout=180s; then | |
| # echo "✅ User Service 배포 성공" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES user-service" | |
| # else | |
| # echo "❌ User Service 배포 실패! 진단 정보 출력 중..." | |
| # FAILED_SERVICES="$FAILED_SERVICES user-service" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=user-service | |
| # kubectl describe deployment user-service -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=user-service -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # Trading Service 배포 확인 | |
| # echo "" | |
| # echo "🚀 Trading Service 배포 상태 확인 중..." | |
| # if kubectl rollout status deployment/trading-service -n ${{ env.EKS_NAMESPACE }} --timeout=180s; then | |
| # echo "✅ Trading Service 배포 성공" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES trading-service" | |
| # else | |
| # echo "❌ Trading Service 배포 실패! 진단 정보 출력 중..." | |
| # FAILED_SERVICES="$FAILED_SERVICES trading-service" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=trading-service | |
| # kubectl describe deployment trading-service -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=trading-service -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # Pay Service 배포 확인 | |
| # echo "" | |
| # echo "🚀 Pay Service 배포 상태 확인 중..." | |
| # if kubectl rollout status deployment/pay-service -n ${{ env.EKS_NAMESPACE }} --timeout=180s; then | |
| # echo "✅ Pay Service 배포 성공" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES pay-service" | |
| # else | |
| # echo "❌ Pay Service 배포 실패! 진단 정보 출력 중..." | |
| # FAILED_SERVICES="$FAILED_SERVICES pay-service" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=pay-service | |
| # kubectl describe deployment pay-service -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=pay-service -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # Vote Service 배포 확인 | |
| # echo "" | |
| # echo "🚀 Vote Service 배포 상태 확인 중..." | |
| # if kubectl rollout status deployment/vote-service -n ${{ env.EKS_NAMESPACE }} --timeout=180s; then | |
| # echo "✅ Vote Service 배포 성공" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES vote-service" | |
| # else | |
| # echo "❌ Vote Service 배포 실패! 진단 정보 출력 중..." | |
| # FAILED_SERVICES="$FAILED_SERVICES vote-service" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=vote-service | |
| # kubectl describe deployment vote-service -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=vote-service -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # Next.js Client 배포 확인 | |
| # echo "" | |
| # echo "🚀 Next.js Client 배포 상태 확인 중..." | |
| # if kubectl rollout status deployment/nextjs-client -n ${{ env.EKS_NAMESPACE }} --timeout=120s; then | |
| # echo "✅ Next.js Client 배포 성공" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES nextjs-client" | |
| # else | |
| # echo "❌ Next.js Client 배포 실패! 진단 정보 출력 중..." | |
| # FAILED_SERVICES="$FAILED_SERVICES nextjs-client" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client | |
| # kubectl describe deployment nextjs-client -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # 최종 배포 결과 요약 | |
| # echo "" | |
| # echo "==================================================" | |
| # echo "📊 배포 결과 요약" | |
| # echo "==================================================" | |
| # | |
| # if [ -n "$SUCCESS_SERVICES" ]; then | |
| # echo "✅ 성공한 서비스:$SUCCESS_SERVICES" | |
| # fi | |
| # | |
| # if [ -n "$FAILED_SERVICES" ]; then | |
| # echo "❌ 실패한 서비스:$FAILED_SERVICES" | |
| # echo "" | |
| # echo "⚠️ 일부 서비스 배포가 실패했습니다!" | |
| # exit 1 | |
| # else | |
| # echo "" | |
| # echo "🎉 모든 서비스 배포 완료!" | |
| # fi | |
| # | |
| # - name: Verify deployment | |
| # if: github.ref == 'refs/heads/main' | |
| # run: | | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl get services -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl get ingress -n ${{ env.EKS_NAMESPACE }} | |
| # | |
| # - name: Prewarm Next.js Client | |
| # if: github.ref == 'refs/heads/main' | |
| # run: | | |
| # echo "🔥 Next.js 클라이언트 Prewarming 시작..." | |
| # SITE_URL="https://xn--o79aq2k062a.store" | |
| # | |
| # # 배포 완료 대기 (최대 2분) | |
| # echo "⏳ Next.js 클라이언트 준비 대기 중..." | |
| # for i in {1..24}; do | |
| # if kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client -o jsonpath='{.items[0].status.conditions[?(@.type=="Ready")].status}' | grep -q "True"; then | |
| # echo "✅ Next.js 클라이언트 준비 완료!" | |
| # break | |
| # fi | |
| # echo "대기 중... ($i/24)" | |
| # sleep 5 | |
| # done | |
| # | |
| # # Prewarming 요청 (5회) | |
| # echo "🔥 워밍업 요청 전송 중..." | |
| # for i in {1..5}; do | |
| # STATUS_CODE=$(curl -s -o /dev/null -w "%{http_code}" --max-time 30 "$SITE_URL" || echo "000") | |
| # if [ "$STATUS_CODE" = "200" ]; then | |
| # echo "✅ 워밍업 요청 $i/5 성공 (HTTP $STATUS_CODE)" | |
| # else | |
| # echo "⚠️ 워밍업 요청 $i/5 실패 (HTTP $STATUS_CODE)" | |
| # fi | |
| # sleep 1 | |
| # done | |
| # | |
| # echo "🎉 Prewarming 완료! 사용자들이 빠른 응답을 경험할 수 있습니다." | |
| # | |
| # - name: Diagnostics on failure | |
| # if: failure() | |
| # run: | | |
| # kubectl get deploy -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl describe deploy api-gateway -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway -o wide | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=200 || true |