diff --git a/analysis/generate_html_report.py b/analysis/generate_html_report.py
index 49aff78..527e6c9 100644
--- a/analysis/generate_html_report.py
+++ b/analysis/generate_html_report.py
@@ -28,6 +28,8 @@ def generate_report():
for fpath in csv_files:
fname = os.path.basename(fpath)
+ if fname == 'detector_results.csv':
+ continue
label = KNOWN_LABELS.get(fname, fname)
if label == fname:
# Parse 20260217032620_326am_10h_23m.csv
@@ -251,24 +253,24 @@ def generate_report():
rows.forEach(row => {
let hrs = parseFloat(row.cells[3].dataset.sort) || 0;
totalHrs += hrs;
-
+
let evts = parseFloat(row.cells[12].innerText) || 0;
totalEvents += evts;
-
+
let pcArr = row.cells[13].innerText.split('/');
totalEvents10 += parseInt(pcArr[0] || 0);
totalEvents15 += parseInt(pcArr[1] || 0);
-
+
totalMajorA += parseInt(row.cells[17].innerText) || 0;
totalMajorB += parseInt(row.cells[18].innerText) || 0;
totalMajorC += parseInt(row.cells[19].innerText) || 0;
-
+
sumTab += (parseFloat(row.querySelector('.cell-tab').dataset.value) || 0) * hrs;
sumScore += (parseFloat(row.querySelector('.cell-score').dataset.value) || 0) * hrs;
-
+
sumDelta += (parseFloat(row.querySelector('.cell-delta').dataset.value) || 0) * evts;
sumP90 += (parseFloat(row.querySelector('.cell-p90').dataset.value) || 0) * evts;
-
+
let typeArr = row.cells[11].innerText.split('/');
sumTypeA += (parseFloat(typeArr[0]) || 0) * evts;
sumTypeB += (parseFloat(typeArr[1]) || 0) * evts;
@@ -316,7 +318,7 @@ def generate_report():
tr.innerHTML = `
|
${mergedDate} |
- Merged: ${labels.join(' + ')} |
+ Merged: ${labels.join(' + ')} |
${hrStr} |
${newScore.toFixed(1)} |
${newTab.toFixed(1)} |
@@ -464,7 +466,7 @@ def generate_report():
| Inc |
Date / Time |
- Night Label |
+ Notes |
Length |
Score (0-100) |
TAB |
@@ -516,7 +518,7 @@ def cell(metric_key, val):
dt = datetime.datetime.strptime(m_date.group(1), "%Y%m%d%H%M%S")
prev_dt = dt - datetime.timedelta(days=1)
time_str = dt.strftime("%I:%M%p").lstrip("0").lower()
- date_str = f"{prev_dt.month}/{prev_dt.day}-{dt.month}/{dt.day} {time_str}"
+ date_str = f"{prev_dt.month}/{prev_dt.day}-{dt.month}/{dt.day}/{dt.strftime('%y')} {time_str}"
except:
pass
@@ -530,11 +532,28 @@ def cell(metric_key, val):
type_str = f"{r.get('pct_a', 0):.0f}/{r.get('pct_b', 0):.0f}/{r.get('pct_c', 0):.0f}"
+ # Uncheck daytime sessions by default (start hour before 8pm / after 6am)
+ is_daytime = False
+ m_hour = re.match(r'^\d{8}(\d{2})', fname)
+ if m_hour:
+ hour = int(m_hour.group(1))
+ if 6 <= hour < 20:
+ is_daytime = True
+ checked_attr = "" if is_daytime else "checked"
+
html.append(f"
")
- html.append(f' | ')
+ html.append(f' | ')
chart_fname = fname.replace('.csv', '_chart.html')
html.append(f'{date_str} | ')
- html.append(f'{r["label"]} | ')
+ # Split label into display label and notes
+ label_text = r["label"]
+ # Strip date prefix and time/duration suffix, whatever remains is notes
+ tmp = label_text
+ tmp = re.sub(r'^\d{4}-\d{2}-\d{2}\s*', '', tmp)
+ tmp = re.sub(r'^\d+/\d+-\d+/\d+\s*', '', tmp)
+ tmp = re.sub(r'\d+[ap]m\s+\d+h\s+\d+m\s*$', '', tmp)
+ notes_text = tmp.strip()
+ html.append(f'{notes_text} | ')
html.append(f'{hr_str} | ')
html.append(cell('score', r['score']))
html.append(cell('tab', r['tab']))
diff --git a/analysis/hr_spike_detector.py b/analysis/hr_spike_detector.py
index 4adab5e..5f7f144 100644
--- a/analysis/hr_spike_detector.py
+++ b/analysis/hr_spike_detector.py
@@ -894,7 +894,7 @@ def load_data(filepath: str, source: str = 'auto') -> np.ndarray:
if hr_cols:
hr_col = hr_cols[0]
-
+
hr = df[hr_col].values.astype(float)
elif df.shape[1] == 1:
hr = df.iloc[:, 0].values.astype(float)
@@ -907,7 +907,21 @@ def load_data(filepath: str, source: str = 'auto') -> np.ndarray:
else:
print(f"Available columns: {list(df.columns)}")
raise ValueError("Could not identify HR column. Please specify.")
-
+
+ # Resample to 1Hz if timestamps indicate non-1s intervals
+ time_cols = [c for c in df.columns if c.lower() in ['time', 'timestamp']]
+ if time_cols and len(hr) >= 2:
+ try:
+ t = pd.to_datetime(df[time_cols[0]])
+ interval_s = (t.iloc[1] - t.iloc[0]).total_seconds()
+ if interval_s > 1.5: # Not already 1Hz
+ # Resample by repeating each value for its interval
+ hr_1hz = np.repeat(hr, int(round(interval_s)))
+ print(f" Resampled from {interval_s:.0f}s intervals: {len(hr)} -> {len(hr_1hz)} samples")
+ return hr_1hz
+ except Exception:
+ pass
+
return hr
except ImportError:
diff --git a/analysis/run_detector_batch.py b/analysis/run_detector_batch.py
index 2879232..972c5f8 100644
--- a/analysis/run_detector_batch.py
+++ b/analysis/run_detector_batch.py
@@ -42,18 +42,21 @@ def generate_session_chart(fpath, hr_smooth, baseline, valid, events, summary, c
os.makedirs(chart_dir, exist_ok=True)
- # Read the timestamp column
+ # Read the timestamp column and generate 1Hz time axis matching resampled data
try:
df = pd.read_csv(fpath)
if 'Time' in df.columns:
- t_axis = pd.to_datetime(df['Time'])
- start_time_str = t_axis.iloc[0].strftime("%Y-%m-%d %H:%M:%S")
- start_ts = t_axis.iloc[0].timestamp()
+ csv_times = pd.to_datetime(df['Time'])
+ start_time = csv_times.iloc[0]
+ start_time_str = start_time.strftime("%Y-%m-%d %H:%M:%S")
+ start_ts = start_time.timestamp()
+ # Generate 1Hz time axis to match resampled hr_smooth length
+ t_axis = pd.date_range(start=start_time, periods=len(hr_smooth), freq='1s')
else:
t_axis = np.arange(len(hr_smooth)) / 3600.0
start_time_str = ""
start_ts = 0
-
+
# Extract CSV content for in-browser export
csv_content = df.to_csv(index=False).replace('\\n', '\\\\n').replace('\\"', '\\\\"')
csv_js_string = f'`{csv_content}`'
@@ -386,7 +389,7 @@ def analyze_night(fpath, label, generate_chart=False, chart_dir=None):
out_lines.append(f"{'='*80}")
try:
- # Load using the detector's own loader
+ # Load using the detector's own loader (resamples to 1Hz)
hr_raw = load_data(fpath, source='auto')
n = len(hr_raw)
hrs = n / 3600
diff --git a/screenshot.png b/screenshot.png
new file mode 100644
index 0000000..eaf7b33
Binary files /dev/null and b/screenshot.png differ