FIX: deploy and client app route #193
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Deploy ToGather Microservices to AWS EKS | |
| on: | |
| push: | |
| branches: [ main ] | |
| pull_request: | |
| branches: [ main ] | |
| workflow_dispatch: | |
| env: | |
| AWS_REGION: ap-northeast-2 | |
| ECR_REGISTRY: ${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.ap-northeast-2.amazonaws.com | |
| EKS_CLUSTER_NAME: togather-cluster | |
| EKS_NAMESPACE: togather | |
| CDN_URL: https://d36ue99r8i68ow.cloudfront.net | |
| S3_BUCKET_NAME: togather-static-assets | |
| CLOUDFRONT_DISTRIBUTION_ID: E15ZDIW40YBVEN | |
| jobs: | |
| build-and-deploy: | |
| runs-on: ubuntu-latest | |
| steps: | |
| # --- ๊ณตํต ์ค๋น ๋จ๊ณ ---!!! | |
| - name: Checkout Server Repository | |
| uses: actions/checkout@v4 | |
| - name: Set gradlew permissions | |
| run: chmod +x gradlew | |
| - name: Configure AWS credentials | |
| uses: aws-actions/configure-aws-credentials@v4 | |
| with: | |
| aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | |
| aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | |
| aws-region: ${{ env.AWS_REGION }} | |
| - name: Login to Amazon ECR | |
| id: login-ecr | |
| uses: aws-actions/amazon-ecr-login@v2 | |
| - name: Checkout Client Repository | |
| uses: actions/checkout@v4 | |
| with: | |
| repository: ToGather-Final/ToGather-Client | |
| token: ${{ secrets.GH_PAT }} | |
| path: togather-client | |
| - name: Setup pnpm | |
| uses: pnpm/action-setup@v4 | |
| with: | |
| version: latest | |
| - name: Cache pnpm dependencies | |
| uses: actions/cache@v3 | |
| with: | |
| path: | | |
| ~/.pnpm-store | |
| togather-client/node_modules | |
| key: ${{ runner.os }}-pnpm-${{ hashFiles('togather-client/pnpm-lock.yaml') }} | |
| restore-keys: | | |
| ${{ runner.os }}-pnpm- | |
| - name: Setup JDK 17 | |
| uses: actions/setup-java@v4 | |
| with: | |
| java-version: '17' | |
| distribution: 'temurin' | |
| - name: Cache Gradle packages | |
| uses: actions/cache@v3 | |
| with: | |
| path: | | |
| ~/.gradle/caches | |
| ~/.gradle/wrapper | |
| key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle*') }} | |
| restore-keys: | | |
| ${{ runner.os }}-gradle- | |
| # --- Next.js ํด๋ผ์ด์ธํธ ๋จผ์ ๋น๋ (๋น ๋ฅธ ํผ๋๋ฐฑ) --- | |
| - name: Build Next.js Client (Priority) | |
| run: | | |
| cd togather-client | |
| export CDN_URL_CLEAN=$(echo "${{ env.CDN_URL }}" | sed 's:/*$::') | |
| echo "๐ฐ Using CDN_URL=$CDN_URL_CLEAN" | |
| pnpm install --frozen-lockfile | |
| # โ Next.js ๋จ์ผ ๋น๋ (Node runtime) | |
| SERVER_API_BASE_URL=http://api-gateway.togather.svc.cluster.local:8000/api \ | |
| NEXT_PUBLIC_API_BASE_URL=https://xn--o79aq2k062a.store/api \ | |
| NEXT_PUBLIC_WS_URL=wss://xn--o79aq2k062a.store/ws \ | |
| CDN_URL=$CDN_URL_CLEAN pnpm run build | |
| # โ ๋น๋ ๊ฒฐ๊ณผ ๊ฒ์ฆ (server.js ๋๋ next-server.js ์ค ํ๋๋ผ๋ ์์ผ๋ฉด ์ฑ๊ณต) | |
| if [ ! -f ".next/standalone/server.js" ] && [ ! -f ".next/standalone/server.mjs" ] && [ ! -f ".next/standalone/next-server.js" ]; then | |
| echo "โ Next.js standalone server entry not found!" | |
| echo "๐ Available files in .next/standalone:" | |
| ls -la .next/standalone/ || echo "No standalone directory found" | |
| echo "๐ Available files in .next:" | |
| ls -la .next/ || echo "No .next directory found" | |
| exit 1 | |
| else | |
| echo "โ Next.js standalone build successful" | |
| if [ -f ".next/standalone/server.js" ]; then | |
| echo "โ Found server.js" | |
| fi | |
| if [ -f ".next/standalone/next-server.js" ]; then | |
| echo "โ Found next-server.js" | |
| fi | |
| fi | |
| # โ .next/server ์ ์ธํ๊ณ ์ํฐํฉํธ ์์ฑ | |
| cd .. | |
| tar -czf next-artifacts.tgz -C togather-client .next/standalone .next/static public src/app src/components src/contexts src/hooks src/lib src/utils src/constants src/types src/services src/containers | |
| echo "โ next-artifacts.tgz ์์ฑ ์๋ฃ" | |
| # --- Next.js ๋น๋ ์ํฐํฉํธ ์ ๋ก๋ (1ํ๋ง) --- | |
| - name: Upload Next build artifacts (Priority) | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: next-artifacts | |
| path: next-artifacts.tgz | |
| # --- Docker ๋น๋ --- | |
| - name: Download and Extract Next.js Build Artifacts (Priority) | |
| uses: actions/download-artifact@v4 | |
| with: | |
| name: next-artifacts | |
| path: ./togather-client | |
| - name: Extract Next.js Artifacts (Priority) | |
| run: | | |
| tar -xzf ./togather-client/next-artifacts.tgz -C ./togather-client | |
| echo "โ next-artifacts.tgz ์์ถ ํด์ ์๋ฃ" | |
| - name: Build and Push Next.js Runtime Image (Priority) | |
| continue-on-error: true | |
| run: | | |
| echo "๐ Building Next.js runtime image..." | |
| cd togather-client | |
| if docker build \ | |
| --build-arg SERVER_API_BASE_URL=http://api-gateway.togather.svc.cluster.local:8000/api \ | |
| --build-arg NEXT_PUBLIC_API_BASE_URL=https://xn--o79aq2k062a.store/api \ | |
| --build-arg NEXT_PUBLIC_WS_URL=wss://xn--o79aq2k062a.store/ws \ | |
| --build-arg CDN_URL=${{ env.CDN_URL }} \ | |
| -t $ECR_REGISTRY/togather/client:${{ github.sha }} \ | |
| -t $ECR_REGISTRY/togather/client:latest \ | |
| -f Dockerfile .; then | |
| echo "โ Next.js build successful" | |
| if docker push $ECR_REGISTRY/togather/client:${{ github.sha }} && docker push $ECR_REGISTRY/togather/client:latest; then | |
| echo "โ Next.js Docker image successfully built and pushed!" | |
| else | |
| echo "โ Next.js push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "โ Next.js build failed" | |
| exit 1 | |
| fi | |
| # --- S3 ์ ์ ์์ฐ ์ ๋ก๋ (๋์ผํ ๋น๋ ๊ฒฐ๊ณผ ์ฌ์ฉ) --- | |
| - name: Upload Static Assets to S3 (Priority) | |
| run: | | |
| echo "๐ Uploading static assets to S3..." | |
| cd togather-client | |
| # โ ๋์ผํ ๋น๋ ๊ฒฐ๊ณผ๋ฅผ S3์ ์ ๋ก๋ | |
| pnpm run upload-assets | |
| echo "โ Static assets uploaded to S3 successfully!" | |
| env: | |
| CDN_URL: ${{ env.CDN_URL }} | |
| S3_BUCKET_NAME: ${{ env.S3_BUCKET_NAME }} | |
| CLOUDFRONT_DISTRIBUTION_ID: ${{ env.CLOUDFRONT_DISTRIBUTION_ID }} | |
| # --- S3 ๋ฒํท ์ ์ฑ ์ค์ --- | |
| - name: Configure S3 Bucket Policy for CloudFront | |
| run: | | |
| echo "๐ง Configuring S3 bucket policy for CloudFront access..." | |
| aws s3api put-bucket-policy \ | |
| --bucket ${{ env.S3_BUCKET_NAME }} \ | |
| --policy file://scripts/s3-bucket-policy.json | |
| echo "โ S3 bucket policy configured successfully!" | |
| # --- kubectl ์ค์น (Next.js ๋ฐฐํฌ๋ฅผ ์ํด) --- | |
| - name: Install kubectl (Priority) | |
| uses: azure/setup-kubectl@v3 | |
| with: | |
| version: 'v1.28.0' | |
| # --- Next.js ํด๋ผ์ด์ธํธ EKS ๋ฐฐํฌ (๋น ๋ฅธ ํผ๋๋ฐฑ) --- | |
| - name: Deploy Next.js Client to EKS (Priority) | |
| if: github.ref == 'refs/heads/main' | |
| run: | | |
| echo "๐ Deploying Next.js Client first for quick feedback..." | |
| # kubectl ์ค์ | |
| aws eks update-kubeconfig --region ${{ env.AWS_REGION }} --name ${{ env.EKS_CLUSTER_NAME }} | |
| export ECR_REGISTRY=${{ env.ECR_REGISTRY }} | |
| # ๊ธฐ์กด Next.js ํด๋ผ์ด์ธํธ Pod ์ ๋ฆฌ (๋ฌธ์ ๊ฐ ์๋ Pod๋ค ์ ๊ฑฐ) | |
| echo "๐งน Cleaning up existing Next.js Client pods..." | |
| kubectl delete pods -l app=nextjs-client -n ${{ env.EKS_NAMESPACE }} --ignore-not-found=true || true | |
| # Next.js ํด๋ผ์ด์ธํธ๋ง ๋จผ์ ๋ฐฐํฌ | |
| echo "๐ฆ Deploying Next.js Client..." | |
| if envsubst < k8s/nextjs-client.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }}; then | |
| echo "โ Next.js Client deployed successfully" | |
| # ์ด๋ฏธ์ง ์ ๋ฐ์ดํธ | |
| if kubectl set image deployment/nextjs-client nextjs-client=$ECR_REGISTRY/togather/client:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true; then | |
| echo "โ Next.js Client image updated successfully" | |
| # Pod ์ฌ์์ ๋๊ธฐ | |
| echo "โณ Waiting for Next.js Client pods to be ready..." | |
| kubectl rollout status deployment/nextjs-client -n ${{ env.EKS_NAMESPACE }} --timeout=300s || echo "โ ๏ธ Rollout timeout, but continuing..." | |
| else | |
| echo "โ Next.js Client image update failed" | |
| fi | |
| else | |
| echo "โ Next.js Client deployment failed" | |
| fi | |
| - name: Build with Gradle (Backend) | |
| run: | | |
| chmod +x gradlew | |
| ./gradlew build -x test | |
| # --- Spring Services Docker Build (with error handling) ---- | |
| - name: Build and Push API Gateway image | |
| continue-on-error: true | |
| run: | | |
| echo "๐ Building API Gateway image..." | |
| if docker build -f api-gateway/Dockerfile -t $ECR_REGISTRY/togather/api-gateway:${{ github.sha }} .; then | |
| echo "โ API Gateway build successful" | |
| docker tag $ECR_REGISTRY/togather/api-gateway:${{ github.sha }} $ECR_REGISTRY/togather/api-gateway:latest | |
| if docker push $ECR_REGISTRY/togather/api-gateway:${{ github.sha }} && docker push $ECR_REGISTRY/togather/api-gateway:latest; then | |
| echo "โ API Gateway pushed successfully" | |
| else | |
| echo "โ API Gateway push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "โ API Gateway build failed" | |
| exit 1 | |
| fi | |
| - name: Build and Push User Service image | |
| continue-on-error: true | |
| run: | | |
| echo "๐ Building User Service image..." | |
| if docker build -f user-service/Dockerfile -t $ECR_REGISTRY/togather/user-service:${{ github.sha }} .; then | |
| echo "โ User Service build successful" | |
| docker tag $ECR_REGISTRY/togather/user-service:${{ github.sha }} $ECR_REGISTRY/togather/user-service:latest | |
| if docker push $ECR_REGISTRY/togather/user-service:${{ github.sha }} && docker push $ECR_REGISTRY/togather/user-service:latest; then | |
| echo "โ User Service pushed successfully" | |
| else | |
| echo "โ User Service push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "โ User Service build failed" | |
| exit 1 | |
| fi | |
| - name: Build and Push Trading Service image | |
| continue-on-error: true | |
| run: | | |
| echo "๐ Building Trading Service image..." | |
| if docker build -f trading-service/Dockerfile -t $ECR_REGISTRY/togather/trading-service:${{ github.sha }} .; then | |
| echo "โ Trading Service build successful" | |
| docker tag $ECR_REGISTRY/togather/trading-service:${{ github.sha }} $ECR_REGISTRY/togather/trading-service:latest | |
| if docker push $ECR_REGISTRY/togather/trading-service:${{ github.sha }} && docker push $ECR_REGISTRY/togather/trading-service:latest; then | |
| echo "โ Trading Service pushed successfully" | |
| else | |
| echo "โ Trading Service push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "โ Trading Service build failed" | |
| exit 1 | |
| fi | |
| - name: Build and Push Pay Service image | |
| continue-on-error: true | |
| run: | | |
| echo "๐ Building Pay Service image..." | |
| if docker build -f pay-service/Dockerfile -t $ECR_REGISTRY/togather/pay-service:${{ github.sha }} .; then | |
| echo "โ Pay Service build successful" | |
| docker tag $ECR_REGISTRY/togather/pay-service:${{ github.sha }} $ECR_REGISTRY/togather/pay-service:latest | |
| if docker push $ECR_REGISTRY/togather/pay-service:${{ github.sha }} && docker push $ECR_REGISTRY/togather/pay-service:latest; then | |
| echo "โ Pay Service pushed successfully" | |
| else | |
| echo "โ Pay Service push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "โ Pay Service build failed" | |
| exit 1 | |
| fi | |
| - name: Build and Push Vote Service image | |
| continue-on-error: true | |
| run: | | |
| echo "๐ Building Vote Service image..." | |
| if docker build -f vote-service/Dockerfile -t $ECR_REGISTRY/togather/vote-service:${{ github.sha }} .; then | |
| echo "โ Vote Service build successful" | |
| docker tag $ECR_REGISTRY/togather/vote-service:${{ github.sha }} $ECR_REGISTRY/togather/vote-service:latest | |
| if docker push $ECR_REGISTRY/togather/vote-service:${{ github.sha }} && docker push $ECR_REGISTRY/togather/vote-service:latest; then | |
| echo "โ Vote Service pushed successfully" | |
| else | |
| echo "โ Vote Service push failed" | |
| exit 1 | |
| fi | |
| else | |
| echo "โ Vote Service build failed" | |
| exit 1 | |
| fi | |
| # --- Spring Services Build Summary --- | |
| - name: Spring Services Build Summary | |
| run: | | |
| echo "๐ Spring Services Build Summary:" | |
| echo "==================================" | |
| echo "โ API Gateway: ${{ steps.build-api-gateway.outcome }}" | |
| echo "โ User Service: ${{ steps.build-user-service.outcome }}" | |
| echo "โ Trading Service: ${{ steps.build-trading-service.outcome }}" | |
| echo "โ Pay Service: ${{ steps.build-pay-service.outcome }}" | |
| echo "โ Vote Service: ${{ steps.build-vote-service.outcome }}" | |
| # --- EKS ๋ฐฐํฌ --- | |
| - name: Configure kubectl for EKS | |
| run: | | |
| aws eks update-kubeconfig --region ${{ env.AWS_REGION }} --name ${{ env.EKS_CLUSTER_NAME }} | |
| - name: Deploy to EKS | |
| if: github.ref == 'refs/heads/main' | |
| run: | | |
| kubectl apply -f k8s/namespace.yaml | |
| kubectl apply -f k8s/api-gateway-rbac.yaml -n ${{ env.EKS_NAMESPACE }} | |
| export ECR_REGISTRY=${{ env.ECR_REGISTRY }} | |
| export CERTIFICATE_ARN=${{ secrets.CERTIFICATE_ARN }} | |
| # Secrets ์์ฑ | |
| kubectl apply -f - <<EOF | |
| apiVersion: v1 | |
| kind: Secret | |
| metadata: | |
| name: togather-secrets | |
| namespace: ${{ env.EKS_NAMESPACE }} | |
| type: Opaque | |
| stringData: | |
| SPRING_DATASOURCE_USERNAME: "${{ secrets.DB_USERNAME }}" | |
| SPRING_DATASOURCE_PASSWORD: "${{ secrets.DB_PASSWORD }}" | |
| DB_USERNAME: "${{ secrets.DB_USERNAME }}" | |
| DB_PASSWORD: "${{ secrets.DB_PASSWORD }}" | |
| JWT_SECRET_KEY: "${{ secrets.JWT_SECRET_KEY }}" | |
| JWT_SECRET: "${{ secrets.JWT_SECRET_KEY }}" | |
| SPRING_RABBITMQ_USERNAME: "admin" | |
| SPRING_RABBITMQ_PASSWORD: "${{ secrets.RABBITMQ_PASSWORD }}" | |
| REDIS_PASSWORD: "${{ secrets.REDIS_PASSWORD }}" | |
| SPRING_DATA_REDIS_PASSWORD: "${{ secrets.REDIS_PASSWORD }}" | |
| EOF | |
| # ๋ฆฌ์์ค ๋ฐฐํฌ (with error handling) | |
| echo "๐ Deploying Kubernetes resources..." | |
| FAILED_DEPLOYMENTS="" | |
| SUCCESS_DEPLOYMENTS="" | |
| for file in api-gateway user-service trading-service pay-service vote-service ingress; do | |
| echo "๐ฆ Deploying $file..." | |
| if envsubst < k8s/$file.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }}; then | |
| echo "โ $file deployed successfully" | |
| SUCCESS_DEPLOYMENTS="$SUCCESS_DEPLOYMENTS $file" | |
| else | |
| echo "โ $file deployment failed" | |
| FAILED_DEPLOYMENTS="$FAILED_DEPLOYMENTS $file" | |
| fi | |
| done | |
| # Additional resources | |
| echo "๐ฆ Deploying additional resources..." | |
| for resource in configmap redis rabbitmq hpa; do | |
| echo "๐ฆ Deploying $resource..." | |
| if kubectl apply -f k8s/$resource.yaml -n ${{ env.EKS_NAMESPACE }}; then | |
| echo "โ $resource deployed successfully" | |
| SUCCESS_DEPLOYMENTS="$SUCCESS_DEPLOYMENTS $resource" | |
| else | |
| echo "โ $resource deployment failed" | |
| FAILED_DEPLOYMENTS="$FAILED_DEPLOYMENTS $resource" | |
| fi | |
| done | |
| # ์ต์ ์ด๋ฏธ์ง ๋ฐ์ (with error handling) | |
| echo "๐ Updating service images..." | |
| for svc in api-gateway user-service trading-service pay-service vote-service; do | |
| echo "๐ Updating $svc image..." | |
| if kubectl set image deployment/$svc $svc=$ECR_REGISTRY/togather/$svc:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true; then | |
| echo "โ $svc image updated successfully" | |
| else | |
| echo "โ $svc image update failed" | |
| FAILED_DEPLOYMENTS="$FAILED_DEPLOYMENTS $svc-image" | |
| fi | |
| done | |
| # ๋ฐฐํฌ ๊ฒฐ๊ณผ ์์ฝ | |
| echo "" | |
| echo "๐ Deployment Summary:" | |
| echo "=====================" | |
| if [ -n "$SUCCESS_DEPLOYMENTS" ]; then | |
| echo "โ Successful: $SUCCESS_DEPLOYMENTS" | |
| fi | |
| if [ -n "$FAILED_DEPLOYMENTS" ]; then | |
| echo "โ Failed: $FAILED_DEPLOYMENTS" | |
| echo "โ ๏ธ Some deployments failed, but continuing..." | |
| fi | |
| # Next.js ํด๋ผ์ด์ธํธ ์ต์ข ์ํ ํ์ธ | |
| echo "" | |
| echo "๐ Next.js Client Final Status:" | |
| echo "===============================" | |
| kubectl get pods -l app=nextjs-client -n ${{ env.EKS_NAMESPACE }} -o wide || echo "โ Failed to get Next.js Client pods" | |
| # Next.js ํด๋ผ์ด์ธํธ Health Check | |
| echo "" | |
| echo "๐ฅ Next.js Client Health Check:" | |
| NEXTJS_POD=$(kubectl get pods -l app=nextjs-client -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$NEXTJS_POD" ]; then | |
| echo "๐ Pod: $NEXTJS_POD" | |
| kubectl logs $NEXTJS_POD -n ${{ env.EKS_NAMESPACE }} --tail=20 || echo "โ Failed to get logs" | |
| else | |
| echo "โ No Next.js Client pod found" | |
| fi | |
| # --- ๋ฐฐํฌ ์๋ฃ ํ Prewarm --- | |
| - name: Prewarm Next.js Client | |
| if: github.ref == 'refs/heads/main' | |
| run: | | |
| echo "๐ฅ Prewarming Next.js..." | |
| SITE_URL="https://xn--o79aq2k062a.store" | |
| for i in {1..5}; do | |
| STATUS=$(curl -s -o /dev/null -w "%{http_code}" "$SITE_URL" || echo "000") | |
| echo "Request $i => HTTP $STATUS" | |
| sleep 3 | |
| done | |
| echo "โ Prewarm completed!" | |
| # --- ์์ธํ Pod ์ํ ์ ๊ฒ ๋ฐ ๋ก๊ทธ ์ถ๋ ฅ --- | |
| - name: Detailed Pod Status Check | |
| if: always() | |
| run: | | |
| echo "๐ Detailed Pod Status Check" | |
| echo "============================" | |
| # ๋ชจ๋ Pod ์ํ ํ์ธ | |
| echo "๐ All Pods Status:" | |
| kubectl get pods -n ${{ env.EKS_NAMESPACE }} -o wide || echo "โ Failed to get pods" | |
| # Next.js Client Pod ์ํ ์์ธ ํ์ธ | |
| echo "" | |
| echo "๐ Next.js Client Pod Status:" | |
| NEXTJS_PODS=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$NEXTJS_PODS" ]; then | |
| for pod in $NEXTJS_PODS; do | |
| echo "๐ Pod: $pod" | |
| kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.phase}' 2>/dev/null && echo "" | |
| kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 2>/dev/null && echo " (Ready)" | |
| kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.containerStatuses[0].ready}' 2>/dev/null && echo " (Container Ready)" | |
| done | |
| else | |
| echo "โ No Next.js Client pods found" | |
| fi | |
| # API Gateway Pod ์ํ ํ์ธ | |
| echo "" | |
| echo "๐ API Gateway Pod Status:" | |
| API_PODS=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$API_PODS" ]; then | |
| for pod in $API_PODS; do | |
| echo "๐ Pod: $pod" | |
| kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.phase}' 2>/dev/null && echo "" | |
| kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 2>/dev/null && echo " (Ready)" | |
| done | |
| else | |
| echo "โ No API Gateway pods found" | |
| fi | |
| # --- ๋น์ ์ Pod ๋ก๊ทธ ์ถ๋ ฅ --- | |
| - name: Unhealthy Pod Logs | |
| if: always() | |
| run: | | |
| echo "๐จ Unhealthy Pod Logs Check" | |
| echo "==========================" | |
| # Next.js Client ๋น์ ์ Pod ๋ก๊ทธ | |
| echo "๐ Next.js Client Pod Logs:" | |
| NEXTJS_PODS=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$NEXTJS_PODS" ]; then | |
| for pod in $NEXTJS_PODS; do | |
| echo "๐ Pod: $pod" | |
| READY_STATUS=$(kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 2>/dev/null || echo "Unknown") | |
| if [ "$READY_STATUS" != "True" ]; then | |
| echo "โ ๏ธ Pod $pod is not ready (Status: $READY_STATUS)" | |
| echo "๐ Recent logs from $pod:" | |
| kubectl logs $pod -n ${{ env.EKS_NAMESPACE }} --tail=50 || echo "โ Failed to get logs from $pod" | |
| echo "" | |
| echo "๐ Pod description for $pod:" | |
| kubectl describe pod $pod -n ${{ env.EKS_NAMESPACE }} || echo "โ Failed to describe pod $pod" | |
| echo "" | |
| else | |
| echo "โ Pod $pod is ready" | |
| fi | |
| done | |
| fi | |
| # API Gateway ๋น์ ์ Pod ๋ก๊ทธ | |
| echo "๐ API Gateway Pod Logs:" | |
| API_PODS=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$API_PODS" ]; then | |
| for pod in $API_PODS; do | |
| echo "๐ Pod: $pod" | |
| READY_STATUS=$(kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 2>/dev/null || echo "Unknown") | |
| if [ "$READY_STATUS" != "True" ]; then | |
| echo "โ ๏ธ Pod $pod is not ready (Status: $READY_STATUS)" | |
| echo "๐ Recent logs from $pod:" | |
| kubectl logs $pod -n ${{ env.EKS_NAMESPACE }} --tail=50 || echo "โ Failed to get logs from $pod" | |
| echo "" | |
| echo "๐ Pod description for $pod:" | |
| kubectl describe pod $pod -n ${{ env.EKS_NAMESPACE }} || echo "โ Failed to describe pod $pod" | |
| echo "" | |
| else | |
| echo "โ Pod $pod is ready" | |
| fi | |
| done | |
| fi | |
| # --- Health Check ์ํ ํ์ธ --- | |
| - name: Health Check Status | |
| if: always() | |
| run: | | |
| echo "๐ฅ Health Check Status" | |
| echo "=====================" | |
| # Next.js Client health check | |
| echo "๐ Next.js Client Health Check:" | |
| NEXTJS_PODS=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "") | |
| if [ -n "$NEXTJS_PODS" ]; then | |
| for pod in $NEXTJS_PODS; do | |
| echo "๐ Testing /api/healthz on pod: $pod" | |
| POD_IP=$(kubectl get pod $pod -n ${{ env.EKS_NAMESPACE }} -o jsonpath='{.status.podIP}' 2>/dev/null || echo "") | |
| if [ -n "$POD_IP" ]; then | |
| echo "๐ Pod IP: $POD_IP" | |
| # Pod ๋ด๋ถ์์ health check ํ ์คํธ | |
| kubectl exec $pod -n ${{ env.EKS_NAMESPACE }} -- curl -s -o /dev/null -w "%{http_code}" http://localhost:3000/api/healthz 2>/dev/null || echo "โ Health check failed" | |
| else | |
| echo "โ Could not get pod IP" | |
| fi | |
| done | |
| fi | |
| # --- ์๋ฌ ๋ฐ์ ์ ์์ธ ์ง๋จ --- | |
| - name: Detailed Error Diagnostics | |
| if: failure() | |
| run: | | |
| echo "๐จ DEPLOYMENT FAILED - Detailed diagnostics..." | |
| echo "=============================================" | |
| # ๋ชจ๋ Pod ์ํ | |
| echo "๐ All Pods Status:" | |
| kubectl get pods -n ${{ env.EKS_NAMESPACE }} -o wide || echo "โ Failed to get pods" | |
| # ์ด๋ฒคํธ ํ์ธ | |
| echo "" | |
| echo "๐ Recent Events:" | |
| kubectl get events -n ${{ env.EKS_NAMESPACE }} --sort-by='.lastTimestamp' | tail -20 || echo "โ Failed to get events" | |
| # ์๋น์ค ์ํ | |
| echo "" | |
| echo "๐ Services Status:" | |
| kubectl get svc -n ${{ env.EKS_NAMESPACE }} || echo "โ Failed to get services" | |
| # Ingress ์ํ | |
| echo "" | |
| echo "๐ Ingress Status:" | |
| kubectl get ingress -n ${{ env.EKS_NAMESPACE }} || echo "โ Failed to get ingress" | |
| #name: Deploy ToGather Microservices to AWS EKS | |
| # | |
| #on: | |
| # push: | |
| # branches: [ main ] | |
| # pull_request: | |
| # branches: [ main ] | |
| # workflow_dispatch: | |
| # | |
| #env: | |
| # AWS_REGION: ap-northeast-2 | |
| # ECR_REGISTRY: ${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.ap-northeast-2.amazonaws.com | |
| # EKS_CLUSTER_NAME: togather-cluster | |
| # EKS_NAMESPACE: togather | |
| # # CDN ์ค์ ! | |
| # CDN_URL: https://d36ue99r8i68ow.cloudfront.net | |
| # S3_BUCKET_NAME: togather-static-assets | |
| # CLOUDFRONT_DISTRIBUTION_ID: E15ZDIW40YBVEN | |
| # | |
| #jobs: | |
| # build-and-deploy: | |
| # runs-on: ubuntu-latest | |
| # | |
| # steps: | |
| # - name: Checkout code | |
| # uses: actions/checkout@v4 | |
| # | |
| # - name: Set gradlew permissions | |
| # run: chmod +x gradlew | |
| # | |
| # - name: Configure AWS credentials | |
| # uses: aws-actions/configure-aws-credentials@v4 | |
| # with: | |
| # aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | |
| # aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | |
| # aws-region: ${{ env.AWS_REGION }} | |
| # | |
| # - name: Login to Amazon ECR | |
| # id: login-ecr | |
| # uses: aws-actions/amazon-ecr-login@v2 | |
| # | |
| # - name: Checkout Client Repo # ToGather-Client ์ ๊ทผ! | |
| # uses: actions/checkout@v4 | |
| # with: | |
| # repository: ToGather-Final/ToGather-Client | |
| # token: ${{ secrets.GH_PAT }} | |
| # path: togather-client | |
| # | |
| # - name: Setup pnpm | |
| # uses: pnpm/action-setup@v4 | |
| # with: | |
| # version: latest | |
| # | |
| # - name: Cache pnpm dependencies | |
| # uses: actions/cache@v3 | |
| # with: | |
| # path: | | |
| # ~/.pnpm-store | |
| # togather-client/node_modules | |
| # key: ${{ runner.os }}-pnpm-${{ hashFiles('togather-client/pnpm-lock.yaml') }} | |
| # restore-keys: | | |
| # ${{ runner.os }}-pnpm- | |
| # | |
| # - name: Set up JDK 17 | |
| # uses: actions/setup-java@v4 | |
| # with: | |
| # java-version: '17' | |
| # distribution: 'temurin' | |
| # | |
| # - name: Cache Gradle packages | |
| # uses: actions/cache@v3 | |
| # with: | |
| # path: | | |
| # ~/.gradle/caches | |
| # ~/.gradle/wrapper | |
| # key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle*') }} | |
| # restore-keys: | | |
| # ${{ runner.os }}-gradle- | |
| # | |
| # - name: Build with Gradle | |
| # run: | | |
| # chmod +x gradlew | |
| # ./gradlew build -x test | |
| # | |
| # # - name: Build and push Next.js Client image | |
| # # run: | | |
| # # docker build -t $ECR_REGISTRY/togather/client:${{ github.sha }} -f ./togather-client/Dockerfile ./togather-client | |
| # # docker build -t $ECR_REGISTRY/togather/client:latest -f ./togather-client/Dockerfile ./togather-client | |
| # # docker push $ECR_REGISTRY/togather/client:${{ github.sha }} | |
| # # docker push $ECR_REGISTRY/togather/client:latest | |
| # | |
| # - name: Build Next.js Client and upload static assets | |
| # run: | | |
| # cd togather-client | |
| # export CDN_URL_CLEAN=$(echo "${{ env.CDN_URL }}" | sed 's:/*$::') | |
| # echo "๐ฐ Using CDN_URL=$CDN_URL_CLEAN" | |
| # | |
| # pnpm install --frozen-lockfile | |
| # | |
| # # โ Next.js ๋น๋ (๋จ 1ํ) | |
| # SERVER_API_BASE_URL=http://api-gateway.togather.svc.cluster.local:8000/api \ | |
| # NEXT_PUBLIC_API_BASE_URL=https://xn--o79aq2k062a.store/api \ | |
| # CDN_URL=$CDN_URL_CLEAN pnpm run build | |
| # | |
| # # โ ๋น๋ ์ฐ์ถ๋ฌผ ์ํฐํฉํธ๋ก ๋ณด๊ด (Docker์ S3์์ ๋์ผ ๋น๋ ์ฌ์ฉ) | |
| # cd .. | |
| # tar -czf next-artifacts.tgz -C togather-client .next/standalone .next/static .next/server public | |
| # echo "โ next-artifacts.tgz ์์ฑ ์๋ฃ" | |
| # | |
| # # โ ์ ์ ์์ฐ ์ ๋ก๋ ๋ฐ CDN ๋ฌดํจํ | |
| # cd togather-client | |
| # pnpm run upload-assets | |
| # env: | |
| # CDN_URL: ${{ env.CDN_URL }} | |
| # S3_BUCKET_NAME: ${{ env.S3_BUCKET_NAME }} | |
| # CLOUDFRONT_DISTRIBUTION_ID: ${{ env.CLOUDFRONT_DISTRIBUTION_ID }} | |
| # NEXT_PUBLIC_API_BASE_URL: https://xn--o79aq2k062a.store | |
| # | |
| # # โ next-artifacts.tgz ํ์ผ์ ์ ๋ก๋ (๋น๋ ๊ฒฐ๊ณผ ๋ฐฑ์ ) | |
| # - name: Upload Next build artifacts | |
| # uses: actions/upload-artifact@v4 | |
| # with: | |
| # name: next-artifacts | |
| # path: next-artifacts.tgz | |
| # | |
| # # โ ๋น๋ ๊ฒฐ๊ณผ๋ฅผ Docker ์ด๋ฏธ์ง ์์ฑ์ ์ฌ์ฌ์ฉ | |
| # - name: Download Next.js build artifacts | |
| # uses: actions/download-artifact@v4 | |
| # with: | |
| # name: next-artifacts | |
| # path: ./togather-client | |
| # | |
| # echo "โ Next.js build completed. Checking output structure..." | |
| # ls -R .next | head -n 40 | |
| # | |
| # # โ server entry ๊ฒ์ฆ | |
| # if [ ! -f ".next/standalone/server.js" ] && [ ! -f ".next/standalone/server.mjs" ]; then | |
| # echo "โ .next/standalone/server.js(.mjs) not found!" | |
| # echo "๋น๋ ์ฐ์ถ๋ฌผ ๋๋ฝ. ๋น๋ ์คํจ๋ก ๊ฐ์ฃผํฉ๋๋ค." | |
| # ls -R .next/standalone | head -n 50 || true | |
| # exit 1 | |
| # fi | |
| # | |
| # | |
| # # โ ๋น๋ ์ฐ์ถ๋ฌผ ์ํฐํฉํธ๋ก ๋ณด๊ด (Docker์ S3์์ ๋์ผ ๋น๋ ์ฌ์ฉ)!!! | |
| # cd .. | |
| # tar -czf next-artifacts.tgz -C togather-client .next/standalone .next/static public | |
| # echo "โ next-artifacts.tgz ์์ฑ ์๋ฃ" | |
| # | |
| # - name: Build and push Next.js runtime image | |
| # run: | | |
| # echo "๐ Building Next.js runtime image..." | |
| # docker build \ | |
| # --build-arg SERVER_API_BASE_URL=http://api-gateway.togather.svc.cluster.local:8000/api \ | |
| # --build-arg NEXT_PUBLIC_API_BASE_URL=https://xn--o79aq2k062a.store/api \ | |
| # --build-arg CDN_URL=${{ env.CDN_URL }} \ | |
| # -t $ECR_REGISTRY/togather/client:${{ github.sha }} \ | |
| # -f ./togather-client/Dockerfile ./togather-client | |
| # | |
| # # โ ํ๊ทธ ๋ฐ ํธ์ | |
| # docker tag $ECR_REGISTRY/togather/client:${{ github.sha }} $ECR_REGISTRY/togather/client:latest | |
| # docker push $ECR_REGISTRY/togather/client:${{ github.sha }} | |
| # docker push $ECR_REGISTRY/togather/client:latest | |
| # | |
| # # โ ๋น๋ ๊ฒ์ฆ ๋ก๊ทธ | |
| # echo "โ Docker image for Next.js successfully built and pushed!" | |
| # | |
| # | |
| # - name: Upload Next build artifacts | |
| # uses: actions/upload-artifact@v4 | |
| # with: | |
| # name: next-artifacts | |
| # path: next-artifacts.tgz | |
| # | |
| # - name: Download Next.js build artifacts | |
| # uses: actions/download-artifact@v4 | |
| # with: | |
| # name: next-artifacts | |
| # path: ./togather-client | |
| # | |
| # - name: Extract Next.js artifacts | |
| # run: | | |
| # tar -xzf ./togather-client/next-artifacts.tgz -C ./togather-client | |
| # echo "โ next-artifacts.tgz ์์ถ ํด์ ์๋ฃ" | |
| # ls -R ./togather-client/.next/standalone | head -n 20 | |
| # | |
| # # โ kr ๋์ ์ ์ํด ์ถ๊ฐ --build-arg CDN_URL=${{ env.CDN_URL }} \ | |
| # - name: Build and push Next.js runtime image | |
| # run: | | |
| # echo "๐ Building Next.js runtime image..." | |
| # cd togather-client | |
| # | |
| # # ๋น๋ ์ฐ์ถ๋ฌผ ๊ฒ์ฆ | |
| # if [ ! -f ".next/standalone/server.js" ]; then | |
| # echo "โ server.js not found in .next/standalone โ build failed!" | |
| # ls -R .next | head -n 40 | |
| # exit 1 | |
| # fi | |
| # | |
| # docker build \ | |
| # --build-arg SERVER_API_BASE_URL=http://api-gateway.togather.svc.cluster.local:8000/api \ | |
| # --build-arg NEXT_PUBLIC_API_BASE_URL=https://xn--o79aq2k062a.store/api \ | |
| # --build-arg CDN_URL=${{ env.CDN_URL }} \ | |
| # -t $ECR_REGISTRY/togather/client:${{ github.sha }} \ | |
| # -t $ECR_REGISTRY/togather/client:latest \ | |
| # -f Dockerfile . | |
| # | |
| # docker push $ECR_REGISTRY/togather/client:${{ github.sha }} | |
| # docker push $ECR_REGISTRY/togather/client:latest | |
| # | |
| # - name: Build and push Vote Service image | |
| # run: | | |
| # docker build -f vote-service/Dockerfile -t $ECR_REGISTRY/togather/vote-service:${{ github.sha }} . | |
| # docker build -f vote-service/Dockerfile -t $ECR_REGISTRY/togather/vote-service:latest . | |
| # docker push $ECR_REGISTRY/togather/vote-service:${{ github.sha }} | |
| # docker push $ECR_REGISTRY/togather/vote-service:latest | |
| # | |
| # - name: Install kubectl | |
| # uses: azure/setup-kubectl@v3 | |
| # with: | |
| # version: 'v1.28.0' | |
| # | |
| # - name: Configure kubectl for EKS | |
| # run: | | |
| # aws eks update-kubeconfig --region ${{ env.AWS_REGION }} --name ${{ env.EKS_CLUSTER_NAME }} | |
| # | |
| # - name: Deploy to EKS | |
| # if: github.ref == 'refs/heads/main' | |
| # run: | | |
| # # 1. ๋ค์์คํ์ด์ค ์์ฑ (์์ผ๋ฉด ์์ฑ, ์์ผ๋ฉด ๋์ด๊ฐ) | |
| # kubectl apply -f k8s/namespace.yaml | |
| # | |
| # # 2. Secret ์์ฑ (๋ชจ๋ ๋ฏผ๊ฐ ์ ๋ณด๋ฅผ GitHub Secrets์์ ๊ฐ์ ธ์ ํตํฉ) | |
| # kubectl apply -f - <<EOF | |
| # apiVersion: v1 | |
| # kind: Secret | |
| # metadata: | |
| # name: togather-secrets | |
| # namespace: ${{ env.EKS_NAMESPACE }} | |
| # type: Opaque | |
| # stringData: | |
| # SPRING_DATASOURCE_USERNAME: "${{ secrets.DB_USERNAME }}" | |
| # SPRING_DATASOURCE_PASSWORD: "${{ secrets.DB_PASSWORD }}" | |
| # DB_USERNAME: "${{ secrets.DB_USERNAME }}" | |
| # DB_PASSWORD: "${{ secrets.DB_PASSWORD }}" | |
| # JWT_SECRET_KEY: "${{ secrets.JWT_SECRET_KEY }}" | |
| # JWT_SECRET: "${{ secrets.JWT_SECRET_KEY }}" | |
| # SPRING_RABBITMQ_USERNAME: "admin" | |
| # SPRING_RABBITMQ_PASSWORD: "${{ secrets.RABBITMQ_PASSWORD }}" | |
| # RABBITMQ_USERNAME: "admin" | |
| # RABBITMQ_PASSWORD: "${{ secrets.RABBITMQ_PASSWORD }}" | |
| # REDIS_PASSWORD: "${{ secrets.REDIS_PASSWORD }}" | |
| # SPRING_DATA_REDIS_PASSWORD: "${{ secrets.REDIS_PASSWORD }}" | |
| # # KIS ์ฆ๊ถ API ํ๊ฒฝ๋ณ์ | |
| # KIS_BASE_URL: "${{ secrets.KIS_BASE_URL }}" | |
| # KIS_APPKEY: "${{ secrets.KIS_APPKEY }}" | |
| # KIS_APPSECRET: "${{ secrets.KIS_APPSECRET }}" | |
| # EOF | |
| # | |
| # # 3. RBAC ๋จผ์ ์ ์ฉ (ServiceAccount/Role/RoleBinding) | |
| # kubectl apply -f k8s/api-gateway-rbac.yaml -n ${{ env.EKS_NAMESPACE }} | |
| # | |
| # # 4. ํ๊ฒฝ๋ณ์ ์นํ ํ ๋๋จธ์ง ๋ชจ๋ ๋ฆฌ์์ค ๋ฐฐํฌ | |
| # # ECR_REGISTRY์ CERTIFICATE_ARN ํ๊ฒฝ๋ณ์๋ฅผ k8s ํ์ผ๋ค์ ์นํ | |
| # export ECR_REGISTRY=${{ env.ECR_REGISTRY }} | |
| # export CERTIFICATE_ARN=${{ secrets.CERTIFICATE_ARN }} | |
| # | |
| # # ํ๊ฒฝ๋ณ์ ์นํ์ด ํ์ํ ํ์ผ๋ค ๋ฐฐํฌ | |
| # envsubst < k8s/api-gateway.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/user-service.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/trading-service.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/pay-service.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/vote-service.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/nextjs-client.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # envsubst < k8s/ingress.yaml | kubectl apply -f - -n ${{ env.EKS_NAMESPACE }} | |
| # | |
| # # ๋๋จธ์ง ๋ฆฌ์์ค๋ค (ConfigMap, Service, HPA ๋ฑ) ๋ฐฐํฌ | |
| # kubectl apply -f k8s/configmap.yaml -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl apply -f k8s/configmap-env.yaml -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl apply -f k8s/redis.yaml -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl apply -f k8s/rabbitmq.yaml -n ${{ env.EKS_NAMESPACE }} | |
| # # ingress.yaml๊ณผ nextjs-client.yaml์ ์์์ envsubst๋ก ์ด๋ฏธ ๋ฐฐํฌ๋จ | |
| # kubectl apply -f k8s/hpa.yaml -n ${{ env.EKS_NAMESPACE }} # ๐ HPA ์ฌํ์ฑํ | |
| # | |
| # # 5. ๊ฐ ๋ํ๋ก์ด๋จผํธ์ ์ปค๋ฐ SHA ์ด๋ฏธ์ง ์ค์ (๋กค์์ ํธ๋ฆฌ๊ฑฐ) | |
| # kubectl set image deployment/api-gateway api-gateway=${{ env.ECR_REGISTRY }}/togather/api-gateway:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # kubectl set image deployment/user-service user-service=${{ env.ECR_REGISTRY }}/togather/user-service:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # kubectl set image deployment/trading-service trading-service=${{ env.ECR_REGISTRY }}/togather/trading-service:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # kubectl set image deployment/pay-service pay-service=${{ env.ECR_REGISTRY }}/togather/pay-service:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # kubectl set image deployment/vote-service vote-service=${{ env.ECR_REGISTRY }}/togather/vote-service:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # kubectl set image deployment/nextjs-client nextjs-client=${{ env.ECR_REGISTRY }}/togather/client:${{ github.sha }} -n ${{ env.EKS_NAMESPACE }} --record=true | |
| # | |
| # - name: Invalidate CloudFront Cache | |
| # if: github.ref == 'refs/heads/main' | |
| # uses: chetan/invalidate-cloudfront-action@v2 | |
| # env: | |
| # DISTRIBUTION: ${{ env.CLOUDFRONT_DISTRIBUTION_ID }} | |
| # PATHS: "/*" # ๐ก ๋ชจ๋ ํ์ผ(*)์ ๋ฌดํจํํ์ฌ ์ฆ์ ๋ณ๊ฒฝ์ฌํญ ๋ฐ์d | |
| # AWS_REGION: "us-east-1" # ์ธ์ฆ์๋ us-east-1์ ์์ด์ผ ํ๋ฏ๋ก ๋ช ์ | |
| # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} | |
| # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | |
| # | |
| # - name: Wait for deployment to complete | |
| # if: github.ref == 'refs/heads/main' | |
| # run: | | |
| # # ๋ฐฐํฌ ๊ฒฐ๊ณผ ์ถ์ ์ ์ํ ๋ณ์ ์ด๊ธฐํ | |
| # FAILED_SERVICES="" | |
| # SUCCESS_SERVICES="" | |
| # | |
| # # Redis ๋ฐฐํฌ ํ์ธ | |
| # echo "๐ Redis ๋ฐฐํฌ ์ํ ํ์ธ ์ค..." | |
| # if kubectl rollout status deployment/redis -n ${{ env.EKS_NAMESPACE }} --timeout=60s; then | |
| # echo "โ Redis ๋ฐฐํฌ ์ฑ๊ณต" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES redis" | |
| # else | |
| # echo "โ Redis ๋ฐฐํฌ ์คํจ! ์ง๋จ ์ ๋ณด ์ถ๋ ฅ ์ค..." | |
| # FAILED_SERVICES="$FAILED_SERVICES redis" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=redis | |
| # kubectl describe deployment redis -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=redis -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # API Gateway ๋ฐฐํฌ ํ์ธ!! | |
| # echo "" | |
| # echo "๐ API Gateway ๋ฐฐํฌ ์ํ ํ์ธ ์ค..." | |
| # if kubectl rollout status deployment/api-gateway -n ${{ env.EKS_NAMESPACE }} --timeout=240s; then | |
| # echo "โ API Gateway ๋ฐฐํฌ ์ฑ๊ณต" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES api-gateway" | |
| # else | |
| # echo "โ API Gateway ๋ฐฐํฌ ์คํจ! ์ง๋จ ์ ๋ณด ์ถ๋ ฅ ์ค..." | |
| # FAILED_SERVICES="$FAILED_SERVICES api-gateway" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway | |
| # kubectl describe deployment api-gateway -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # User Service ๋ฐฐํฌ ํ์ธ | |
| # echo "" | |
| # echo "๐ User Service ๋ฐฐํฌ ์ํ ํ์ธ ์ค..." | |
| # if kubectl rollout status deployment/user-service -n ${{ env.EKS_NAMESPACE }} --timeout=180s; then | |
| # echo "โ User Service ๋ฐฐํฌ ์ฑ๊ณต" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES user-service" | |
| # else | |
| # echo "โ User Service ๋ฐฐํฌ ์คํจ! ์ง๋จ ์ ๋ณด ์ถ๋ ฅ ์ค..." | |
| # FAILED_SERVICES="$FAILED_SERVICES user-service" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=user-service | |
| # kubectl describe deployment user-service -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=user-service -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # Trading Service ๋ฐฐํฌ ํ์ธ | |
| # echo "" | |
| # echo "๐ Trading Service ๋ฐฐํฌ ์ํ ํ์ธ ์ค..." | |
| # if kubectl rollout status deployment/trading-service -n ${{ env.EKS_NAMESPACE }} --timeout=180s; then | |
| # echo "โ Trading Service ๋ฐฐํฌ ์ฑ๊ณต" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES trading-service" | |
| # else | |
| # echo "โ Trading Service ๋ฐฐํฌ ์คํจ! ์ง๋จ ์ ๋ณด ์ถ๋ ฅ ์ค..." | |
| # FAILED_SERVICES="$FAILED_SERVICES trading-service" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=trading-service | |
| # kubectl describe deployment trading-service -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=trading-service -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # Pay Service ๋ฐฐํฌ ํ์ธ | |
| # echo "" | |
| # echo "๐ Pay Service ๋ฐฐํฌ ์ํ ํ์ธ ์ค..." | |
| # if kubectl rollout status deployment/pay-service -n ${{ env.EKS_NAMESPACE }} --timeout=180s; then | |
| # echo "โ Pay Service ๋ฐฐํฌ ์ฑ๊ณต" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES pay-service" | |
| # else | |
| # echo "โ Pay Service ๋ฐฐํฌ ์คํจ! ์ง๋จ ์ ๋ณด ์ถ๋ ฅ ์ค..." | |
| # FAILED_SERVICES="$FAILED_SERVICES pay-service" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=pay-service | |
| # kubectl describe deployment pay-service -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=pay-service -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # Vote Service ๋ฐฐํฌ ํ์ธ | |
| # echo "" | |
| # echo "๐ Vote Service ๋ฐฐํฌ ์ํ ํ์ธ ์ค..." | |
| # if kubectl rollout status deployment/vote-service -n ${{ env.EKS_NAMESPACE }} --timeout=180s; then | |
| # echo "โ Vote Service ๋ฐฐํฌ ์ฑ๊ณต" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES vote-service" | |
| # else | |
| # echo "โ Vote Service ๋ฐฐํฌ ์คํจ! ์ง๋จ ์ ๋ณด ์ถ๋ ฅ ์ค..." | |
| # FAILED_SERVICES="$FAILED_SERVICES vote-service" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=vote-service | |
| # kubectl describe deployment vote-service -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=vote-service -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # Next.js Client ๋ฐฐํฌ ํ์ธ | |
| # echo "" | |
| # echo "๐ Next.js Client ๋ฐฐํฌ ์ํ ํ์ธ ์ค..." | |
| # if kubectl rollout status deployment/nextjs-client -n ${{ env.EKS_NAMESPACE }} --timeout=120s; then | |
| # echo "โ Next.js Client ๋ฐฐํฌ ์ฑ๊ณต" | |
| # SUCCESS_SERVICES="$SUCCESS_SERVICES nextjs-client" | |
| # else | |
| # echo "โ Next.js Client ๋ฐฐํฌ ์คํจ! ์ง๋จ ์ ๋ณด ์ถ๋ ฅ ์ค..." | |
| # FAILED_SERVICES="$FAILED_SERVICES nextjs-client" | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client | |
| # kubectl describe deployment nextjs-client -n ${{ env.EKS_NAMESPACE }} | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=100 || true | |
| # fi | |
| # | |
| # # ์ต์ข ๋ฐฐํฌ ๊ฒฐ๊ณผ ์์ฝ | |
| # echo "" | |
| # echo "==================================================" | |
| # echo "๐ ๋ฐฐํฌ ๊ฒฐ๊ณผ ์์ฝ" | |
| # echo "==================================================" | |
| # | |
| # if [ -n "$SUCCESS_SERVICES" ]; then | |
| # echo "โ ์ฑ๊ณตํ ์๋น์ค:$SUCCESS_SERVICES" | |
| # fi | |
| # | |
| # if [ -n "$FAILED_SERVICES" ]; then | |
| # echo "โ ์คํจํ ์๋น์ค:$FAILED_SERVICES" | |
| # echo "" | |
| # echo "โ ๏ธ ์ผ๋ถ ์๋น์ค ๋ฐฐํฌ๊ฐ ์คํจํ์ต๋๋ค!" | |
| # exit 1 | |
| # else | |
| # echo "" | |
| # echo "๐ ๋ชจ๋ ์๋น์ค ๋ฐฐํฌ ์๋ฃ!" | |
| # fi | |
| # | |
| # - name: Verify deployment | |
| # if: github.ref == 'refs/heads/main' | |
| # run: | | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl get services -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl get ingress -n ${{ env.EKS_NAMESPACE }} | |
| # | |
| # - name: Prewarm Next.js Client | |
| # if: github.ref == 'refs/heads/main' | |
| # run: | | |
| # echo "๐ฅ Next.js ํด๋ผ์ด์ธํธ Prewarming ์์..." | |
| # SITE_URL="https://xn--o79aq2k062a.store" | |
| # | |
| # # ๋ฐฐํฌ ์๋ฃ ๋๊ธฐ (์ต๋ 2๋ถ) | |
| # echo "โณ Next.js ํด๋ผ์ด์ธํธ ์ค๋น ๋๊ธฐ ์ค..." | |
| # for i in {1..24}; do | |
| # if kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=nextjs-client -o jsonpath='{.items[0].status.conditions[?(@.type=="Ready")].status}' | grep -q "True"; then | |
| # echo "โ Next.js ํด๋ผ์ด์ธํธ ์ค๋น ์๋ฃ!" | |
| # break | |
| # fi | |
| # echo "๋๊ธฐ ์ค... ($i/24)" | |
| # sleep 5 | |
| # done | |
| # | |
| # # Prewarming ์์ฒญ (5ํ) | |
| # echo "๐ฅ ์๋ฐ์ ์์ฒญ ์ ์ก ์ค..." | |
| # for i in {1..5}; do | |
| # STATUS_CODE=$(curl -s -o /dev/null -w "%{http_code}" --max-time 30 "$SITE_URL" || echo "000") | |
| # if [ "$STATUS_CODE" = "200" ]; then | |
| # echo "โ ์๋ฐ์ ์์ฒญ $i/5 ์ฑ๊ณต (HTTP $STATUS_CODE)" | |
| # else | |
| # echo "โ ๏ธ ์๋ฐ์ ์์ฒญ $i/5 ์คํจ (HTTP $STATUS_CODE)" | |
| # fi | |
| # sleep 1 | |
| # done | |
| # | |
| # echo "๐ Prewarming ์๋ฃ! ์ฌ์ฉ์๋ค์ด ๋น ๋ฅธ ์๋ต์ ๊ฒฝํํ ์ ์์ต๋๋ค." | |
| # | |
| # - name: Diagnostics on failure | |
| # if: failure() | |
| # run: | | |
| # kubectl get deploy -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl describe deploy api-gateway -n ${{ env.EKS_NAMESPACE }} | |
| # kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway -o wide | |
| # POD=$(kubectl get pods -n ${{ env.EKS_NAMESPACE }} -l app=api-gateway -o jsonpath='{.items[0].metadata.name}') | |
| # kubectl describe pod $POD -n ${{ env.EKS_NAMESPACE }} || true | |
| # kubectl logs $POD -n ${{ env.EKS_NAMESPACE }} --tail=200 || true |