-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathhancock_constants.py
More file actions
63 lines (43 loc) · 3.21 KB
/
hancock_constants.py
File metadata and controls
63 lines (43 loc) · 3.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
"""Shared constants for Hancock modules.
Centralizes configuration values, limits, and defaults that are referenced
across hancock_agent, orchestration_controller, input_validator, and tests.
"""
from __future__ import annotations
# ── OpenAI dependency guard ───────────────────────────────────────────────────
OPENAI_IMPORT_ERROR_MSG = "OpenAI client not installed. Run: pip install openai"
def require_openai(openai_cls):
"""Raise ImportError when the OpenAI dependency is missing."""
if openai_cls is None:
raise ImportError(OPENAI_IMPORT_ERROR_MSG)
# ── Version ───────────────────────────────────────────────────────────────────
VERSION = "0.6.0"
# ── API defaults ──────────────────────────────────────────────────────────────
DEFAULT_PORT = 5000
DEFAULT_RATE_LIMIT = 60 # requests per window
RATE_LIMIT_WINDOW_SECONDS = 60 # seconds
MAX_RATE_LIMIT_ENTRIES = 10_000 # max tracked IPs before eviction
# ── LLM defaults per mode ────────────────────────────────────────────────────
# Temperature and max_tokens are tuned per use-case:
# - Lower temperature for deterministic output (code, rules, triage)
# - Higher temperature for creative/conversational responses
MODE_DEFAULTS: dict[str, dict] = {
"auto": {"temperature": 0.7, "max_tokens": 1024, "top_p": 0.95},
"pentest": {"temperature": 0.7, "max_tokens": 1024, "top_p": 0.95},
"soc": {"temperature": 0.4, "max_tokens": 1200, "top_p": 0.95},
"code": {"temperature": 0.2, "max_tokens": 2048, "top_p": 0.70},
"ciso": {"temperature": 0.3, "max_tokens": 2048, "top_p": 0.95},
"sigma": {"temperature": 0.2, "max_tokens": 2048, "top_p": 0.70},
"yara": {"temperature": 0.2, "max_tokens": 2048, "top_p": 0.70},
"ioc": {"temperature": 0.3, "max_tokens": 1000, "top_p": 0.90},
"osint": {"temperature": 0.3, "max_tokens": 1200, "top_p": 0.90},
}
# ── Supported modes ──────────────────────────────────────────────────────────
ALL_MODES = tuple(MODE_DEFAULTS.keys())
# ── HTTP response headers ────────────────────────────────────────────────────
HEADER_RATE_LIMIT = "X-RateLimit-Limit"
HEADER_RATE_REMAINING = "X-RateLimit-Remaining"
HEADER_RATE_WINDOW = "X-RateLimit-Window"
HEADER_REQUEST_ID = "X-Request-ID"
# ── Webhook ───────────────────────────────────────────────────────────────────
WEBHOOK_SIGNATURE_HEADER = "X-Hancock-Signature"
WEBHOOK_SIGNATURE_PREFIX = "sha256="