Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,13 @@ dependencies {
implementation 'org.xerial.snappy:snappy-java:1.1.10.5'
implementation 'com.github.ben-manes.caffeine:caffeine:3.1.8'

// Monitoring & Metrics
implementation 'org.springframework.boot:spring-boot-starter-actuator'
implementation 'io.micrometer:micrometer-registry-prometheus'

// ELK Logging
implementation 'net.logstash.logback:logstash-logback-encoder:7.4'

runtimeOnly 'com.mysql:mysql-connector-j'

compileOnly 'org.projectlombok:lombok'
Expand Down
77 changes: 77 additions & 0 deletions docker-compose.elk.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# =============================================================================
# ELK Stack (Elasticsearch + Logstash + Kibana)
# =============================================================================
# 로그 수집 및 분석을 위한 구성
#
# 사용법:
# docker-compose -f docker-compose.yml -f docker-compose.elk.yml up -d
#
# 포트:
# - Elasticsearch: 9200 (HTTP), 9300 (Transport)
# - Logstash: 5044 (Beats), 5000 (TCP)
# - Kibana: 5601
# =============================================================================
version: '3.8'

services:
# ---------------------------------------------------------------------------
# Elasticsearch: 로그 저장 및 검색 엔진
# ---------------------------------------------------------------------------
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.11.0
container_name: ecommerce-elasticsearch
environment:
- discovery.type=single-node
- xpack.security.enabled=false
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ports:
- "9200:9200"
- "9300:9300"
volumes:
- elasticsearch_data:/usr/share/elasticsearch/data
healthcheck:
test: ["CMD-SHELL", "curl -s http://localhost:9200/_cluster/health | grep -q 'green\\|yellow'"]
interval: 30s
timeout: 10s
retries: 5

# ---------------------------------------------------------------------------
# Logstash: 로그 수집 및 파싱
# ---------------------------------------------------------------------------
logstash:
image: docker.elastic.co/logstash/logstash:8.11.0
container_name: ecommerce-logstash
ports:
- "5044:5044" # Beats input
- "5001:5000" # TCP input (Spring Boot Logback) - 5000은 macOS AirPlay가 사용
- "9600:9600" # Logstash API
volumes:
- ./monitoring/logstash/pipeline:/usr/share/logstash/pipeline:ro
- ./monitoring/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
environment:
- "LS_JAVA_OPTS=-Xms256m -Xmx256m"
depends_on:
elasticsearch:
condition: service_healthy

# ---------------------------------------------------------------------------
# Kibana: 로그 시각화 대시보드
# ---------------------------------------------------------------------------
kibana:
image: docker.elastic.co/kibana/kibana:8.11.0
container_name: ecommerce-kibana
ports:
- "5601:5601"
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
depends_on:
elasticsearch:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status | grep -q 'available'"]
interval: 30s
timeout: 10s
retries: 5

volumes:
elasticsearch_data:
116 changes: 116 additions & 0 deletions docker-compose.monitoring.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
# =============================================================================
# 모니터링 환경용 Docker Compose
# =============================================================================
# 부하 테스트 및 성능 모니터링을 위한 구성
#
# 사용법:
# docker-compose -f docker-compose.yml -f docker-compose.monitoring.yml up -d
#
# 포함 서비스:
# - Prometheus (localhost:9090): 메트릭 수집
# - Grafana (localhost:3000): 대시보드
# - Redis Exporter (localhost:9121): Redis 메트릭
# - Kafka Exporter (localhost:9308): Kafka 메트릭
# =============================================================================
version: '3.8'

services:
# ---------------------------------------------------------------------------
# Prometheus: 메트릭 수집 및 저장
# ---------------------------------------------------------------------------
prometheus:
image: prom/prometheus:v2.47.0
container_name: ecommerce-prometheus
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
- '--web.console.templates=/usr/share/prometheus/consoles'
- '--web.enable-lifecycle'
- '--web.enable-remote-write-receiver'
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:9090/-/healthy"]
interval: 30s
timeout: 10s
retries: 3

# ---------------------------------------------------------------------------
# Grafana: 메트릭 시각화 대시보드
# ---------------------------------------------------------------------------
grafana:
image: grafana/grafana:10.1.0
container_name: ecommerce-grafana
ports:
- "3000:3000"
environment:
GF_SECURITY_ADMIN_USER: admin
GF_SECURITY_ADMIN_PASSWORD: admin
GF_USERS_ALLOW_SIGN_UP: "false"
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning:ro
depends_on:
- prometheus
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3000/api/health"]
interval: 30s
timeout: 10s
retries: 3

# ---------------------------------------------------------------------------
# Redis Exporter: Redis 메트릭 수집
# ---------------------------------------------------------------------------
# 수집 메트릭:
# - redis_commands_processed_total: 처리된 명령 수
# - redis_connected_clients: 연결된 클라이언트 수
# - redis_memory_used_bytes: 메모리 사용량
# - redis_keyspace_hits/misses: 캐시 히트/미스
# ---------------------------------------------------------------------------
redis-exporter:
image: oliver006/redis_exporter:v1.55.0
container_name: ecommerce-redis-exporter
ports:
- "9121:9121"
environment:
REDIS_ADDR: redis:6379
depends_on:
- redis
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:9121/metrics"]
interval: 30s
timeout: 10s
retries: 3

# ---------------------------------------------------------------------------
# Kafka Exporter: Kafka 메트릭 수집
# ---------------------------------------------------------------------------
# 수집 메트릭:
# - kafka_consumergroup_lag: Consumer Lag (처리 지연)
# - kafka_topic_partitions: 토픽별 파티션 수
# - kafka_brokers: 활성 브로커 수
# ---------------------------------------------------------------------------
kafka-exporter:
image: danielqsj/kafka-exporter:v1.7.0
container_name: ecommerce-kafka-exporter
ports:
- "9308:9308"
command:
- '--kafka.server=kafka:9092'
- '--topic.filter=.*'
- '--group.filter=.*'
depends_on:
- kafka
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:9308/metrics"]
interval: 30s
timeout: 10s
retries: 3

volumes:
prometheus_data:
grafana_data:
92 changes: 92 additions & 0 deletions docker-compose.pinpoint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
# =============================================================================
# Pinpoint APM (Application Performance Management)
# =============================================================================
# 분산 트레이싱 및 성능 모니터링
#
# 사용법:
# docker-compose -f docker-compose.yml -f docker-compose.pinpoint.yml up -d
#
# 포트:
# - Pinpoint Web: 8079
# - Pinpoint Collector: 9991-9993
# - HBase: 16010 (Master UI), 16030 (Region Server UI)
#
# Agent 설정:
# java -javaagent:/path/to/pinpoint-agent.jar \
# -Dpinpoint.agentId=ecommerce-1 \
# -Dpinpoint.applicationName=ecommerce \
# -jar app.jar
# =============================================================================
version: '3.8'

services:
# ---------------------------------------------------------------------------
# HBase: Pinpoint 데이터 저장소
# ---------------------------------------------------------------------------
pinpoint-hbase:
image: pinpointdocker/pinpoint-hbase:2.5.2
container_name: ecommerce-pinpoint-hbase
ports:
- "16010:16010" # HBase Master UI
- "16030:16030" # HBase Region Server UI
volumes:
- pinpoint_hbase_data:/home/pinpoint/hbase
- pinpoint_zookeeper_data:/home/pinpoint/zookeeper
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:16010/master-status || exit 1"]
interval: 30s
timeout: 10s
retries: 10
start_period: 60s

# ---------------------------------------------------------------------------
# Pinpoint Collector: Agent 데이터 수집
# ---------------------------------------------------------------------------
pinpoint-collector:
image: pinpointdocker/pinpoint-collector:2.5.2
container_name: ecommerce-pinpoint-collector
ports:
- "9991:9991/tcp" # gRPC Agent
- "9992:9992/tcp" # gRPC Stat
- "9993:9993/tcp" # gRPC Span
- "9994:9994/tcp" # gRPC (Additional)
- "9995:9995/udp" # UDP Stat
- "9996:9996/udp" # UDP Span
environment:
- SPRING_PROFILES_ACTIVE=release
- PINPOINT_ZOOKEEPER_ADDRESS=pinpoint-hbase
- CLUSTER_ENABLE=false
- HBASE_HOST=pinpoint-hbase
- HBASE_PORT=2181
- FLINK_CLUSTER_ENABLE=false
depends_on:
pinpoint-hbase:
condition: service_healthy

# ---------------------------------------------------------------------------
# Pinpoint Web: 대시보드 UI
# ---------------------------------------------------------------------------
pinpoint-web:
image: pinpointdocker/pinpoint-web:2.5.2
container_name: ecommerce-pinpoint-web
ports:
- "8079:8079"
environment:
- SPRING_PROFILES_ACTIVE=release
- PINPOINT_ZOOKEEPER_ADDRESS=pinpoint-hbase
- CLUSTER_ENABLE=false
- HBASE_HOST=pinpoint-hbase
- HBASE_PORT=2181
- ADMIN_PASSWORD=admin
depends_on:
pinpoint-hbase:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:8079/serverTime.pinpoint || exit 1"]
interval: 30s
timeout: 10s
retries: 5

volumes:
pinpoint_hbase_data:
pinpoint_zookeeper_data:
2 changes: 1 addition & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ services:
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_LOG_DIRS: /var/lib/kafka/data
CLUSTER_ID: 'ecommerce-kafka-cluster-001'
CLUSTER_ID: 'MkU3OEVBNTcwNTJENDM2Qg'
volumes:
- kafka_data:/var/lib/kafka/data
healthcheck:
Expand Down
Loading