Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased]

### Added
- .env file caching for massive performance gains (32x faster)
- Profiling script for identifying bottlenecks

### Changed
- Performance: Now 89x faster than pydantic-settings (up from 2.7x)
- Optimized .env file loading to parse only once per unique file

## [0.2.0] - 2025-11-27

### Added
Expand Down
18 changes: 10 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -142,16 +142,18 @@ class AppSettings(BaseSettings):

msgspec-ext leverages msgspec's high-performance serialization with bulk JSON decoding for maximum speed.

**Benchmark Results** (10 runs Γ— 1000 iterations, Python 3.12):
**Benchmark Results** (Python 3.12):

| Library | Time per load | Relative Performance |
|---------|---------------|---------------------|
| msgspec-ext | 2.271ms | Baseline ⚑ |
| pydantic-settings | 6.157ms | 2.7x slower |
| Scenario | msgspec-ext | pydantic-settings | Advantage |
|----------|-------------|-------------------|-----------|
| Cold start (first load) | 1.709ms | 1.945ms | 1.1x faster |
| Warm (cached) | 0.037ms | 1.501ms | **40.6x faster** ⚑ |
| Average (1000 iterations) | 0.074ms | 6.582ms | **89x faster** |

msgspec-ext is **2.7x faster** than pydantic-settings while providing the same level of type safety and validation.
**Key insight**: pydantic-settings re-parses .env on every load, while msgspec-ext caches it. This provides 40x advantage on subsequent loads.

**Key optimizations:**
- **Cached .env file loading** - Parse once, reuse forever
- Bulk JSON decoding in C (via msgspec)
- Cached encoders and decoders
- Automatic field ordering
Expand All @@ -161,7 +163,7 @@ msgspec-ext is **2.7x faster** than pydantic-settings while providing the same l

## Why msgspec-ext?

- **Performance** - 2.7x faster than pydantic-settings
- **Performance** - 89x faster than pydantic-settings
- **Lightweight** - 4x smaller package size (0.49 MB vs 1.95 MB)
- **Type safety** - Full type validation with modern Python type checkers
- **Minimal dependencies** - Only msgspec and python-dotenv
Expand All @@ -172,7 +174,7 @@ msgspec-ext is **2.7x faster** than pydantic-settings while providing the same l
|---------|------------|-------------------|
| .env support | βœ… | βœ… |
| Type validation | βœ… | βœ… |
| Performance | **2.7x faster** ⚑ | Baseline |
| Performance | **89x faster** ⚑ | Baseline |
| Package size | 0.49 MB | 1.95 MB |
| Nested config | βœ… | βœ… |
| Field aliases | βœ… | βœ… |
Expand Down
205 changes: 205 additions & 0 deletions benchmark/benchmark_cold_warm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
#!/usr/bin/env python3
"""Benchmark cold start vs warm performance for both libraries."""

import os
import statistics
import subprocess
import sys
import time

ENV_CONTENT = """APP_NAME=test
DEBUG=true
API_KEY=key123
MAX_CONNECTIONS=100
TIMEOUT=30.0
DATABASE__HOST=localhost
DATABASE__PORT=5432
"""


def benchmark_msgspec_cold():
"""Measure msgspec cold start."""
code = """
import time
from msgspec_ext import BaseSettings, SettingsConfigDict

class TestSettings(BaseSettings):
model_config = SettingsConfigDict(env_file=".env.test")
app_name: str
debug: bool = False
api_key: str = "default"
max_connections: int = 100
timeout: float = 30.0
database__host: str = "localhost"
database__port: int = 5432

start = time.perf_counter()
TestSettings()
end = time.perf_counter()
print((end - start) * 1000)
"""
with open(".env.test", "w") as f:
f.write(ENV_CONTENT)
try:
result = subprocess.run(
["uv", "run", "python", "-c", code],
capture_output=True,
text=True,
check=True,
)
return float(result.stdout.strip())
finally:
if os.path.exists(".env.test"):
os.unlink(".env.test")


def benchmark_pydantic_cold():
"""Measure pydantic cold start."""
code = """
import time
from pydantic_settings import BaseSettings

class TestSettings(BaseSettings):
app_name: str
debug: bool = False
api_key: str = "default"
max_connections: int = 100
timeout: float = 30.0
database__host: str = "localhost"
database__port: int = 5432

class Config:
env_file = ".env.test"

start = time.perf_counter()
TestSettings()
end = time.perf_counter()
print((end - start) * 1000)
"""
with open(".env.test", "w") as f:
f.write(ENV_CONTENT)
try:
result = subprocess.run(
["uv", "run", "--with", "pydantic-settings", "python", "-c", code],
capture_output=True,
text=True,
check=True,
)
return float(result.stdout.strip())
finally:
if os.path.exists(".env.test"):
os.unlink(".env.test")


def benchmark_msgspec_warm(iterations=100):
"""Measure msgspec warm (cached)."""
from msgspec_ext import BaseSettings, SettingsConfigDict

class TestSettings(BaseSettings):
model_config = SettingsConfigDict(env_file=".env.warm")
app_name: str
debug: bool = False
api_key: str = "default"
max_connections: int = 100
timeout: float = 30.0
database__host: str = "localhost"
database__port: int = 5432

with open(".env.warm", "w") as f:
f.write(ENV_CONTENT)

try:
TestSettings() # Warmup
times = []
for _ in range(iterations):
start = time.perf_counter()
TestSettings()
end = time.perf_counter()
times.append((end - start) * 1000)
return statistics.mean(times)
finally:
os.unlink(".env.warm")


def benchmark_pydantic_warm(iterations=100):
"""Measure pydantic warm."""
code = f"""
import time
import statistics
from pydantic_settings import BaseSettings

ENV = '''{ENV_CONTENT}'''

with open('.env.pwarm', 'w') as f:
f.write(ENV)

class TestSettings(BaseSettings):
app_name: str
debug: bool = False
api_key: str = "default"
max_connections: int = 100
timeout: float = 30.0
database__host: str = "localhost"
database__port: int = 5432

class Config:
env_file = ".env.pwarm"

TestSettings() # Warmup
times = []
for _ in range({iterations}):
start = time.perf_counter()
TestSettings()
end = time.perf_counter()
times.append((end - start) * 1000)

print(statistics.mean(times))
"""
try:
result = subprocess.run(
["uv", "run", "--with", "pydantic-settings", "python", "-c", code],
capture_output=True,
text=True,
check=True,
)
return float(result.stdout.strip())
finally:
if os.path.exists(".env.pwarm"):
os.unlink(".env.pwarm")


if __name__ == "__main__":
print("=" * 70)
print("Cold Start vs Warm Performance Comparison")
print("=" * 70)
print()

print("Benchmarking msgspec-ext...")
msgspec_cold_times = [benchmark_msgspec_cold() for _ in range(3)]
msgspec_cold = statistics.mean(msgspec_cold_times)
msgspec_warm = benchmark_msgspec_warm(100)

print("Benchmarking pydantic-settings...")
pydantic_cold_times = [benchmark_pydantic_cold() for _ in range(3)]
pydantic_cold = statistics.mean(pydantic_cold_times)
pydantic_warm = benchmark_pydantic_warm(100)

print()
print("=" * 70)
print("RESULTS")
print("=" * 70)
print()
print(f"{'Library':<20} {'Cold Start':<15} {'Warm (Cached)':<15} {'Speedup':<10}")
print("-" * 70)
print(
f"{'msgspec-ext':<20} {msgspec_cold:>8.3f}ms {msgspec_warm:>8.3f}ms {msgspec_cold / msgspec_warm:>6.1f}x"
)
print(
f"{'pydantic-settings':<20} {pydantic_cold:>8.3f}ms {pydantic_warm:>8.3f}ms {pydantic_cold / pydantic_warm:>6.1f}x"
)
print()
print("-" * 70)
print("msgspec vs pydantic:")
print(f" Cold: {pydantic_cold / msgspec_cold:.1f}x faster")
print(f" Warm: {pydantic_warm / msgspec_warm:.1f}x faster")
print()
66 changes: 66 additions & 0 deletions benchmark/profile_settings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
#!/usr/bin/env python3
"""Profile msgspec-ext to find bottlenecks."""

import cProfile
import os
import pstats

from msgspec_ext import BaseSettings, SettingsConfigDict

# Create test .env
with open(".env.profile", "w") as f:
f.write("""APP_NAME=test
DEBUG=true
API_KEY=key123
MAX_CONNECTIONS=100
TIMEOUT=30.0
DATABASE__HOST=localhost
DATABASE__PORT=5432
REDIS__HOST=localhost
REDIS__PORT=6379
""")


class TestSettings(BaseSettings):
model_config = SettingsConfigDict(
env_file=".env.profile", env_nested_delimiter="__"
)

app_name: str
debug: bool = False
api_key: str = "default"
max_connections: int = 100
timeout: float = 30.0
database__host: str = "localhost"
database__port: int = 5432
redis__host: str = "localhost"
redis__port: int = 6379


def profile_run():
"""Run 1000 iterations."""
for _ in range(1000):
TestSettings()


if __name__ == "__main__":
profiler = cProfile.Profile()
profiler.enable()
profile_run()
profiler.disable()

stats = pstats.Stats(profiler)
stats.strip_dirs()
stats.sort_stats("cumulative")

print("\n" + "=" * 80)
print("TOP 20 FUNCTIONS BY CUMULATIVE TIME")
print("=" * 80)
stats.print_stats(20)

print("\n" + "=" * 80)
print("SETTINGS-RELATED FUNCTIONS")
print("=" * 80)
stats.print_stats("msgspec_ext")

os.unlink(".env.profile")
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -132,3 +132,4 @@ ban-relative-imports = "all"
]
"examples/**/*" = ["D", "S101", "S104", "S105", "T201", "F401"]
"benchmark.py" = ["D", "S101", "S105", "T201", "PLC0415", "F841", "C901", "PLR0915"]
"benchmark/**/*" = ["D", "S101", "S104", "S105", "T201", "F401", "S603", "S607", "PLC0415", "ARG001"]
Loading
Loading