-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathhdc_data_folding_v2.py
More file actions
172 lines (135 loc) Β· 6.02 KB
/
hdc_data_folding_v2.py
File metadata and controls
172 lines (135 loc) Β· 6.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import polars as pl
import numpy as np
import os
from glob import glob
from numpy.lib.format import open_memmap
# === CONFIGURATION ===
DATASET_DIR = "datasets/refined/1h"
OUTPUT_DIR = "datasets/hdc_pairs_200_shot"
ORIGINAL_KEEP_COLUMNS = [
"original_close",
"original_volume",
"original_quote_asset_volume",
"original_number_of_trades",
"original_taker_buy_base_asset_volume",
"original_taker_buy_quote_asset_volume"
]
TARGET_LABELS = [
"target_open", "target_high", "target_low", "target_close",
"target_volume", "target_quote_asset_volume", "target_number_of_trades",
"target_taker_buy_base_asset_volume", "target_taker_buy_quote_asset_volume"
]
NUM_ORIGINAL_COLUMNS = 9 # Updated to use original_* columns
NUM_TARGET_COLUMNS = 9
NUM_FEATURE_COLUMNS = None # auto-detected
# Column configuration
WINDOW_SIZE = 200
STRIDE = 1
# === DATA LOADER ===
def load_dataset(path: str):
df = pl.read_csv(path)
total_cols = len(df.columns)
feature_start= NUM_ORIGINAL_COLUMNS + NUM_TARGET_COLUMNS
global NUM_FEATURE_COLUMNS
NUM_FEATURE_COLUMNS = total_cols - feature_start
target_labels = TARGET_LABELS
all_cols = df.columns
feature_labels= all_cols[feature_start:]
features = df[:, feature_start:].to_numpy().astype(np.float32)
targets = df.select(target_labels).to_numpy().astype(np.float32)
# Pull out final row of original values for each 200-step window
original = df.select(ORIGINAL_KEEP_COLUMNS)
original_vals = original.to_numpy().astype(np.float32)
return features, targets, original_vals, feature_labels, target_labels
# === SLIDING-WINDOW PAIR SLICER ===
def slice_dataset_into_triplets(features, targets, originals, window_size=200, stride=1):
X, Y, O = [], [], []
max_i = len(features) - 2 * window_size
for i in range(0, max_i, stride):
X.append(features[ i : i + window_size ])
Y.append(targets[ i+window_size : i + 2*window_size ])
O.append(originals[i + window_size - 1]) # last row of feature window
return np.array(X), np.array(Y), np.array(O)
# === STEP 1: PROCESS & SAVE PER-FILE PAIRS ===
def process_and_save_all(dataset_dir, output_dir, window_size, stride):
os.makedirs(output_dir, exist_ok=True)
csv_paths = sorted(glob(os.path.join(dataset_dir, "*.csv")))
# Will hold labels from the first file
saved_feature_labels = None
saved_target_labels = None
for path in csv_paths:
name = os.path.splitext(os.path.basename(path))[0]
print(f"\nπ Processing {name}")
feats, targs, origs, feat_lbls, targ_lbls = load_dataset(path)
X, Y, O = slice_dataset_into_triplets(feats, targs, origs, window_size, stride)
print(f" β Generated {X.shape[0]} triplets")
# save per-file
np.save(os.path.join(output_dir, f"X_{name}.npy"), X)
np.save(os.path.join(output_dir, f"Y_{name}.npy"), Y)
np.save(os.path.join(output_dir, f"O_{name}.npy"), O)
# capture labels once
if saved_feature_labels is None:
saved_feature_labels = feat_lbls
saved_target_labels = targ_lbls
# also save labels
with open(os.path.join(output_dir, "feature_labels.txt"), "w") as f:
f.write("\n".join(saved_feature_labels))
with open(os.path.join(output_dir, "target_labels.txt"), "w") as f:
f.write("\n".join(saved_target_labels))
with open(os.path.join(output_dir, "original_labels.txt"), "w") as f:
f.write("\n".join(ORIGINAL_KEEP_COLUMNS))
print("\nβ
Per-file slicing complete.")
return
# === STEP 2: MERGE ON-DISK MEMMAPS ===
def merge_pairs(output_dir, window_size):
# find all per-file npys
X_paths = sorted(glob(os.path.join(output_dir, "X_*.npy")))
Y_paths = sorted(glob(os.path.join(output_dir, "Y_*.npy")))
O_paths = sorted(glob(os.path.join(output_dir, "O_*.npy")))
assert len(X_paths) == len(Y_paths) == len(O_paths), "Mismatched X/Y/O files"
total = sum(np.load(xp, mmap_mode="r").shape[0] for xp in X_paths)
print(f"\nπ’ Total sequences to merge: {total}")
# get dims from one file
sample0 = np.load(X_paths[0], mmap_mode="r")
_, w, f = sample0.shape
assert w == window_size, "Window size mismatch"
# similarly, target dims
t0 = np.load(Y_paths[0], mmap_mode="r")
_, w2, t = t0.shape
assert w2 == window_size
o0 = np.load(O_paths[0], mmap_mode="r")
_, o_dim = o0.shape
X_comb = open_memmap(os.path.join(output_dir, "X_pairs_combined.npy"), mode="w+", dtype=np.float32, shape=(total, window_size, f))
Y_comb = open_memmap(os.path.join(output_dir, "Y_pairs_combined.npy"), mode="w+", dtype=np.float32, shape=(total, window_size, t))
O_comb = open_memmap(os.path.join(output_dir, "O_pairs_combined.npy"), mode="w+", dtype=np.float32, shape=(total, o_dim))
# second pass: stream data in
offset = 0
for xp, yp, op in zip(X_paths, Y_paths, O_paths):
x_arr = np.load(xp, mmap_mode="r")
y_arr = np.load(yp, mmap_mode="r")
o_arr = np.load(op, mmap_mode="r")
n = x_arr.shape[0]
X_comb[offset:offset+n, :, :] = x_arr
Y_comb[offset:offset+n, :, :] = y_arr
O_comb[offset:offset+n, :] = o_arr
offset += n
print(f" βΆ Merged {n} sequences, offset now {offset}")
# flush to disk
X_comb.flush()
Y_comb.flush()
O_comb.flush()
print("\nβ
Merge complete:")
print(f" X_pairs_combined.npy β shape {(total, window_size, f)}")
print(f" Y_pairs_combined.npy β shape {(total, window_size, t)}")
print(f" O_pairs_combined.npy β shape {(total, o_dim)}")
return
# === MAIN ===
if __name__ == "__main__":
# 1) per-file slicing
process_and_save_all(DATASET_DIR, OUTPUT_DIR, WINDOW_SIZE, STRIDE)
# 2) prompt to merge
resp = input("\nMerge all per-file pairs into combined memmap? (y/n): ").strip().lower()
if resp in ("y", "yes"):
merge_pairs(OUTPUT_DIR, WINDOW_SIZE)
else:
print("Skipping merge. .npy files remain per-dataset in", OUTPUT_DIR)