|
16 | 16 | import multiprocessing as mp |
17 | 17 | # from WPI_SCA_LIBRARY.LeakageModels import Sbox |
18 | 18 | from LeakageModels import Sbox |
| 19 | +from WPI_SCA_LIBRARY.MRE import * |
19 | 20 |
|
20 | 21 |
|
21 | 22 | def signal_to_noise_ratio(labels: dict, visualize: bool = False, visualization_path: any = None) -> np.ndarray: |
@@ -396,3 +397,56 @@ def success_rate_guessing_entropy(correct_keys: np.ndarray, experiment_ranks: np |
396 | 397 | guessing_entropy = guessing_entropy / num_experiments |
397 | 398 |
|
398 | 399 | return success_rate, guessing_entropy |
| 400 | + |
| 401 | + |
| 402 | +def DecomposeUncertainty(Variational_models: list, traces: np.ndarray, Predictions: np.ndarray, |
| 403 | + Labels: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray): |
| 404 | + """ |
| 405 | + Decomposes predictive uncertainty into aleatoric and epistemic components |
| 406 | + using multiple variational model forward passes. |
| 407 | +
|
| 408 | + :param Variational_models: List of variational models (e.g., MC dropout or ensemble models) |
| 409 | + :type Variational_models: list |
| 410 | + :param traces: Input traces used for prediction |
| 411 | + :type traces: np.ndarray |
| 412 | + :param Predictions: Predictive probabilities from the full model (shape: N x 256) |
| 413 | + :type Predictions: np.ndarray |
| 414 | + :param Labels: True class labels for each trace (shape: N) |
| 415 | + :type Labels: np.ndarray |
| 416 | + :return: Aleatoric uncertainty (A), Epistemic uncertainty (E), and Predictive entropy (P) |
| 417 | + :rtype: (np.ndarray, np.ndarray, np.ndarray) |
| 418 | + :Authors: Mohammad N. (WPI) |
| 419 | + """ |
| 420 | + |
| 421 | + number_of_points = traces.shape[0] |
| 422 | + pred_E = np.zeros((len(Variational_models),number_of_points,256)) |
| 423 | + |
| 424 | + |
| 425 | + for j in range(30): |
| 426 | + |
| 427 | + pred_E[j] = Variational_models[j](traces[:number_of_points]) |
| 428 | + |
| 429 | + ent_E = np.zeros((30,number_of_points)) |
| 430 | + |
| 431 | + for l in range(30): |
| 432 | + for k in range(number_of_points): |
| 433 | + ent_E[l,k] =mbre(pred_E[l,k]) |
| 434 | + |
| 435 | + |
| 436 | + |
| 437 | + |
| 438 | + P = np.zeros((number_of_points,2)) |
| 439 | + A = np.zeros((number_of_points,2)) |
| 440 | + E = np.zeros((number_of_points,2)) |
| 441 | + |
| 442 | + for i in range(number_of_points): |
| 443 | + P[i, 0] = mbre(Predictions[i]) |
| 444 | + |
| 445 | + A[i, 0] = np.mean(ent_E ,axis=0)[i] |
| 446 | + E[i, 0] = P[i, 0] - A[i, 0] |
| 447 | + P[i, 1] = A[i, 1] = E[i, 1] = Predictions[i, Labels[i]] |
| 448 | + |
| 449 | + |
| 450 | + |
| 451 | + return A, E, P |
| 452 | + |
0 commit comments