diff --git a/.gitignore b/.gitignore index 58eb08c..497c1c7 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,4 @@ __pycache__/ MEDfl.egg-info/ fl-env/ dist/ -build/ notebooks/ -docs/ diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000..f5c674f --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,35 @@ +version: 2 + +sphinx: + configuration: docs/source/conf.py + +python: + version: "3.9" + install: + - method: pip + path: . + + + + + + + + + + + + + + + + + + + + + + + + + - requirements: requirements.txt diff --git a/MEDfl.egg-info/PKG-INFO b/MEDfl.egg-info/PKG-INFO index cf5deb1..f9055c3 100644 --- a/MEDfl.egg-info/PKG-INFO +++ b/MEDfl.egg-info/PKG-INFO @@ -1,6 +1,6 @@ -Metadata-Version: 2.1 +Metadata-Version: 2.4 Name: MEDfl -Version: 2.0.5 +Version: 2.0.5.dev4 Summary: Python Open-source package for simulating federated learning and differential privacy Home-page: https://github.com/MEDomics-UdeS/MEDfl Author: MEDomics consortium @@ -17,6 +17,37 @@ Classifier: Programming Language :: Python :: 3.9 Requires-Python: >=3.8,<3.13 Description-Content-Type: text/markdown License-File: LICENSE +Requires-Dist: flwr~=1.18.0 +Requires-Dist: matplotlib~=3.6.3 +Requires-Dist: numpy~=1.26.4 +Requires-Dist: opacus~=1.5.3 +Requires-Dist: pandas~=1.5.2 +Requires-Dist: PyYAML~=6.0 +Requires-Dist: setuptools~=68.0.0 +Requires-Dist: Sphinx~=5.3.0 +Requires-Dist: SQLAlchemy~=1.4.47 +Requires-Dist: torch>=2.7.0 +Requires-Dist: datetime~=5.1 +Requires-Dist: scikit-learn~=1.2.2 +Requires-Dist: sphinx-jsonschema==1.19.1 +Requires-Dist: sphinx-rtd-dark-mode==1.2.4 +Requires-Dist: plotly==5.19.0 +Requires-Dist: optuna==3.5.0 +Requires-Dist: mysql-connector-python~=9.3.0 +Requires-Dist: seaborn~=0.13.2 +Requires-Dist: flwr[simulation] +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: keywords +Dynamic: license-file +Dynamic: project-url +Dynamic: requires-dist +Dynamic: requires-python +Dynamic: summary # MEDfl: A Collaborative Framework for Federated Learning in Medicine diff --git a/MEDfl/rw/client.py b/MEDfl/rw/client.py index 7eda953..4fed968 100644 --- a/MEDfl/rw/client.py +++ b/MEDfl/rw/client.py @@ -1,4 +1,4 @@ -# client.py +# File: client.py import argparse import pandas as pd @@ -21,11 +21,6 @@ except ImportError: GPUtil = None -try: - import xgboost as xgb -except Exception: - xgb = None - class DPConfig: """ @@ -40,10 +35,10 @@ class DPConfig: def __init__( self, - noise_multiplier: float = 1.0, - max_grad_norm: float = 1.0, - batch_size: int = 32, - secure_rng: bool = False, + noise_multiplier=1.0, + max_grad_norm=1.0, + batch_size=32, + secure_rng=False, ): self.noise_multiplier = noise_multiplier self.max_grad_norm = max_grad_norm @@ -51,356 +46,346 @@ def __init__( self.secure_rng = secure_rng -def booster_to_parameters(bst): - # Send JSON so the server can merge trees safely - raw = bst.save_raw(raw_format="json") - # NumPyClient expects a list of numpy arrays - return [np.frombuffer(raw, dtype=np.uint8)] - - -def parameters_to_booster(parameters): - # Handle numpy uint8 array or raw bytes/bytearray - buf = parameters[0] - if isinstance(buf, (bytes, bytearray, memoryview)): - raw = bytes(buf) - else: - arr = np.asarray(buf, dtype=np.uint8) - raw = arr.tobytes() - - booster = xgb.Booster() - # xgboost can load both JSON and binary buffers - booster.load_model(bytearray(raw)) - return booster - - class FlowerClient(fl.client.NumPyClient): def __init__( self, - server_address: str, - data_path: str = "data/data.csv", - dp_config: DPConfig = None, - model_type: str = "nn", - xgb_params: dict = None, - xgb_rounds: int = 50, + server_address, + data_path="data/data.csv", + dp_config=None, + # NEW (optional client overrides; do NOT remove old args) + val_frac=None, + test_frac=None, + id_col=None, + test_ids=None, + seed=42, ): self.server_address = server_address - self.model_type = model_type.lower() - self.xgb_params = xgb_params or {} - self.xgb_rounds = xgb_rounds - - # Store hostname for datasetConfig host-specific overrides - self.hostname = socket.gethostname() - - # Load once; keep the DataFrame so we can rebuild splits from config later - self.df = pd.read_csv(data_path) - - # Defaults at startup (can be overridden dynamically from config)' - default_target = self.df.columns[-1] - default_test_size = 0.20 + self.dp_config = dp_config + self.client_val_frac = val_frac + self.client_test_frac = test_frac + self.id_col = id_col + self.test_ids = test_ids + self.seed = seed + + # Load the CSV once; actual column selection happens on first fit using server config + self._df = pd.read_csv(data_path) + + # Defaults used only for get_properties BEFORE first fit (last column target) + self.feature_names = self._df.columns[:-1].tolist() + self.target_name = self._df.columns[-1] + self.label_counts = self._df[self.target_name].value_counts().to_dict() + self.classes = sorted(self.label_counts.keys()) + + # Tensors for metrics before first fit (fallback to all-but-last as features) + X_default = self._df.iloc[:, :-1].values + y_default = self._df.iloc[:, -1].values + self.X_tensor = torch.tensor(X_default, dtype=torch.float32) + self.y_tensor = torch.tensor(y_default, dtype=torch.float32) + + # Placeholders; we lazily build loaders/model on the first fit when we see server config + self.train_loader = None + self.val_loader = None + self.test_loader = None + + self.model = None + self.criterion = nn.BCEWithLogitsLoss() + self.optimizer = None + + # Effective settings (filled at first fit) + self.effective_features = self.feature_names[:] + self.effective_target = self.target_name + self.effective_val_frac = float(self.client_val_frac) if self.client_val_frac is not None else 0.0 + self.effective_test_frac = float(self.client_test_frac) if self.client_test_frac is not None else 0.0 + + self._initialized = False + self._dp_attached = False # to avoid wrapping twice + + # ---------- helpers + + def _mk_loader(self, X, y, batch_size, shuffle): + x_t = torch.tensor(X, dtype=torch.float32) + y_t = torch.tensor(y, dtype=torch.float32) + return DataLoader(TensorDataset(x_t, y_t), batch_size=batch_size, shuffle=shuffle) + + def _lazy_init_from_server_config(self, config): + """ + Build model and (train, val, test) loaders once, using: + - Server-enforced schema: config['features'] (comma-separated), config['target'] + - Split fractions: client overrides win; else use server's val_fraction/test_fraction + - NEW: if config['test_ids'] is set (per-client from strategy), use ID-based split + """ + # ---------- schema from server (enforced if provided) + srv_features = (config.get("features") or "").strip() + srv_target = (config.get("target") or "").strip() + print(f"[Client] Initializing with server schema: features='{srv_features}', target='{srv_target}'") + + if srv_target: + if srv_target not in self._df.columns: + raise ValueError(f"Server-specified target '{srv_target}' not in CSV columns {list(self._df.columns)}") + target_col = srv_target + else: + target_col = self._df.columns[-1] # fallback (keeps backward compatibility) - # Build initial splits/buffers/metadata - self._prepare_splits( - target_name=default_target, - test_size=default_test_size, - dp_config=dp_config, - ) + if srv_features: + feat_cols = [c.strip() for c in srv_features.split(",") if c.strip()] + missing = [c for c in feat_cols if c not in self._df.columns] + if missing: + raise ValueError(f"Server-specified feature(s) not found in CSV: {missing}") + else: + feat_cols = [c for c in self._df.columns if c != target_col] + + # ---------- fractions: client overrides > server defaults > fallback (0.10/0.10) + srv_val = config.get("val_fraction", None) + srv_test = config.get("test_fraction", None) + val_frac = self.client_val_frac if self.client_val_frac is not None else (float(srv_val) if srv_val is not None else 0.10) + test_frac = self.client_test_frac if self.client_test_frac is not None else (float(srv_test) if srv_test is not None else 0.10) + + if not (0.0 <= val_frac < 1.0): + raise ValueError(f"Invalid val_frac: {val_frac} (must be 0 <= val_frac < 1)") + + # ---------- NEW: adopt test_ids / id_col from server config if provided + if (not self.test_ids or not self.test_ids.strip()) and config.get("test_ids"): + + # strategy (per_client) can inject test_ids as list or CSV; normalize to CSV string + ti = config.get("test_ids") + if isinstance(ti, (list, tuple, set)): + self.test_ids = ",".join(str(x) for x in ti) + else: + self.test_ids = str(ti) + + print(f"[Client] Using server-provided test_ids: {self.test_ids}") + + if (not self.id_col) and config.get("id_col"): + self.id_col = str(config.get("id_col")) + + # ---------- extract arrays with the enforced schema + X_all = self._df[feat_cols].values + y_all = self._df[target_col].values + + # Keep tensors for global metrics logging (same behavior as before) + self.X_tensor = torch.tensor(X_all, dtype=torch.float32) + self.y_tensor = torch.tensor(y_all, dtype=torch.float32) + + # ---------- split + if self.test_ids and self.test_ids.strip(): # ID-based mode (unchanged behavior, now also supports server-provided IDs) + print("[Client] Using ID-based test selection") + test_ids_list = [i.strip() for i in self.test_ids.split(',') if i.strip()] + + if self.id_col and self.id_col in self._df.columns: + id_series = self._df[self.id_col] + # Align types between id_series and test_ids_list + if np.issubdtype(id_series.dtype, np.number): + test_ids_list = [int(i) for i in test_ids_list] + else: + test_ids_list = [str(i) for i in test_ids_list] + + else: + print(f"[Client] Falling back to line numbers (index) as IDs since id_col='{self.id_col}' is invalid or not provided") + id_series = self._df.index + try: + test_ids_list = [int(i) for i in test_ids_list] + except ValueError: + raise ValueError("Test IDs must be integers when using line numbers as IDs") + + test_mask = id_series.isin(test_ids_list) + if not test_mask.any(): + print("[Client] Warning: No matching IDs found for test set; it will be empty") + + X_test = X_all[test_mask] + y_test = y_all[test_mask] + X_trval = X_all[~test_mask] + y_trval = y_all[~test_mask] + + actual_test_frac = len(y_test) / len(y_all) if len(y_all) > 0 else 0.0 + if val_frac + actual_test_frac >= 1.0: + raise ValueError(f"Validation fraction {val_frac} + actual test fraction {actual_test_frac} >= 1.0") + + self.effective_test_frac = actual_test_frac # For logging + + else: # Fraction-based mode (existing) + if not (0.0 <= test_frac < 1.0 and (val_frac + test_frac) < 1.0): + raise ValueError(f"Invalid fractions: val={val_frac}, test={test_frac} (require 0 <= val,test < 1 and val+test < 1)") + + strat_all = y_all if len(np.unique(y_all)) > 1 else None + X_trval, X_test, y_trval, y_test = train_test_split( + X_all, y_all, test_size=test_frac, random_state=self.seed, stratify=strat_all + ) - # Apply DP once at startup (NN only) - self.privacy_engine = None - if dp_config and self.model_type == "nn": + # Split val from trval (common to both modes) + if val_frac > 0 and len(y_trval) > 0: + actual_test_frac = len(y_test) / len(y_all) if len(y_all) > 0 else 0.0 + rel_val = val_frac / (1.0 - actual_test_frac) if (1.0 - actual_test_frac) > 0 else 0.0 + strat_tr = y_trval if len(np.unique(y_trval)) > 1 else None + X_train, X_val, y_train, y_val = train_test_split( + X_trval, y_trval, test_size=rel_val, random_state=self.seed, stratify=strat_tr + ) + else: + X_train, y_train = X_trval, y_trval + X_val, y_val = np.empty((0, X_all.shape[1])), np.empty((0,)) + + # ---------- build loaders + batch_size = self.dp_config.batch_size if self.dp_config else 32 + self.train_loader = self._mk_loader(X_train, y_train, batch_size, shuffle=True) + self.val_loader = self._mk_loader(X_val, y_val, batch_size=batch_size, shuffle=False) if len(y_val) else None + self.test_loader = self._mk_loader(X_test, y_test, batch_size, shuffle=False) + + # ---------- model/optimizer + input_dim = X_all.shape[1] + self.model = Net(input_dim) + self.optimizer = optim.SGD(self.model.parameters(), lr=0.01) + + # ---------- attach DP (same behavior as before; only wraps the train loader) + if self.dp_config and not self._dp_attached: try: from opacus import PrivacyEngine - - self.privacy_engine = PrivacyEngine() - ( - self.model, - self.optimizer, - self.train_loader, - ) = self.privacy_engine.make_private( + privacy_engine = PrivacyEngine() + (self.model, self.optimizer, self.train_loader) = privacy_engine.make_private( module=self.model, optimizer=self.optimizer, data_loader=self.train_loader, - noise_multiplier=dp_config.noise_multiplier, - max_grad_norm=dp_config.max_grad_norm, - secure_rng=dp_config.secure_rng, + noise_multiplier=self.dp_config.noise_multiplier, + max_grad_norm=self.dp_config.max_grad_norm, + secure_rng=self.dp_config.secure_rng, ) + self._dp_attached = True except ImportError: print("Opacus non installé : exécution sans DP.") - # -------------------------------------------------------------------------- - # Helpers to (re)prepare data from a target column and test_size - # -------------------------------------------------------------------------- - def _prepare_splits(self, target_name, test_size, dp_config): - """Create train/test split, tensors/DMatrices, and metadata for the chosen target/test_size.""" - df = self.df - - if target_name not in df.columns: - raise ValueError(f"Target '{target_name}' not found in CSV columns: {list(df.columns)}") - - # Clamp and sanitize test_size - try: - ts = float(test_size) - except Exception: - ts = 0.20 - ts = max(1e-6, min(ts, 0.9)) - - # Build X/y from chosen target - X_df = df.drop(columns=[target_name]) - y_series = df[target_name] - X_full = X_df.values - - # If y isn't numeric, factorize to integers (keep class labels for metadata) - if not np.issubdtype(np.asarray(y_series).dtype, np.number): - y_vals, uniques = pd.factorize(y_series) - y_full = y_vals.astype(np.float32, copy=False) - classes = list(map(str, uniques)) - label_counts = y_series.value_counts().to_dict() + # ---------- record effective settings + self.effective_features = feat_cols + self.effective_target = target_col + self.effective_val_frac = float(val_frac) + # effective_test_frac already set above if ID mode; otherwise use the input + if self.test_ids and self.test_ids.strip(): + pass # Already set else: - y_full = y_series.values.astype(np.float32, copy=False) - classes = sorted(pd.Series(y_full).unique().tolist()) - label_counts = pd.Series(y_full).value_counts().to_dict() - - # Heuristic for stratification (classification-like) - is_classif = np.unique(y_full).shape[0] <= 50 - strat = y_full if is_classif else None - - X_train, X_test, y_train, y_test = train_test_split( - X_full, - y_full, - test_size=ts, - random_state=42, - stratify=strat if strat is not None else None, - ) + self.effective_test_frac = float(test_frac) - # --- Update metadata used by get_properties --- - self.feature_names = X_df.columns.tolist() - self.target_name = target_name - self.label_counts = label_counts - self.classes = classes - - # --- Build per-model buffers --- - if self.model_type == "nn": - # Train tensors - self.X_tensor = torch.tensor(X_train, dtype=torch.float32) - self.y_tensor = torch.tensor(y_train, dtype=torch.float32) - - # Test tensors - self.X_test_tensor = torch.tensor(X_test, dtype=torch.float32) - self.y_test_tensor = torch.tensor(y_test, dtype=torch.float32) - - # DataLoaders - batch_size = getattr(dp_config, "batch_size", 32) if dp_config else 32 - train_ds = TensorDataset(self.X_tensor, self.y_tensor) - self.train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True) - self.test_loader = DataLoader( - TensorDataset(self.X_test_tensor, self.y_test_tensor), - batch_size=batch_size, - shuffle=False, - ) + self._initialized = True + print(f"[Client] Initialized with features={feat_cols}, target={target_col}, val={val_frac}, test={self.effective_test_frac}") - # Create model/criterion/optimizer if not present - input_dim = X_train.shape[1] - if not hasattr(self, "model"): - self.model = Net(input_dim) - self.criterion = nn.BCEWithLogitsLoss() - self.optimizer = optim.SGD(self.model.parameters(), lr=0.01) + # ---------- FL API (unchanged behavior) ---------- - else: - # XGBoost DMatrices - if xgb is None: - raise ImportError("xgboost is not installed. `pip install xgboost`") - - self.X_np = X_train.astype(np.float32, copy=False) # train - self.y_np = y_train.astype(np.float32, copy=False) - self.dtrain = xgb.DMatrix(self.X_np, label=self.y_np) - - self.X_np_test = X_test.astype(np.float32, copy=False) # test - self.y_np_test = y_test.astype(np.float32, copy=False) - self.dtest = xgb.DMatrix(self.X_np_test, label=self.y_np_test) - - # Cold-start booster only if not present yet - if not hasattr(self, "bst"): - self.bst = xgb.train(self.xgb_params, self.dtrain, num_boost_round=1) - - # Remember current prep so we can skip unnecessary rebuilds - self._prepared_key = (self.target_name, float(ts)) - - def _pick_target_and_frac(self, cfg): - """Pick ('target', 'testFrac') from a small dict; support alternative keys.""" - if not isinstance(cfg, dict): - return None, None - tgt = cfg.get("target") or cfg.get("Target") or cfg.get("label") - frac = cfg.get("testFrac", cfg.get("test_size", None)) - try: - frac = float(frac) if frac is not None else None - except Exception: - frac = None - return tgt, frac - - def _resolve_dataset_from_cfg(self, ds_cfg): - """ - Resolve (target, test_size) from datasetConfig: - - If isGlobal=True, take globalConfig.{target,testFrac} - - else take datasetConfig[hostname].{target,testFrac} - - fallbacks: keep current settings or defaults - """ - default_target = getattr(self, "target_name", self.df.columns[-1]) - default_frac = getattr(self, "_prepared_key", (None, 0.2))[1] if hasattr(self, "_prepared_key") else 0.2 - - if not isinstance(ds_cfg, dict): - return default_target, default_frac - - is_global = bool(ds_cfg.get("isGlobal")) - if is_global: - tgt, frac = self._pick_target_and_frac(ds_cfg.get("globalConfig", {})) - else: - host_cfg = ds_cfg.get(self.hostname) - if not isinstance(host_cfg, dict): - lower_map = {str(k).lower(): v for k, v in ds_cfg.items() if isinstance(v, dict)} - host_cfg = lower_map.get(self.hostname.lower()) - tgt, frac = self._pick_target_and_frac(host_cfg or {}) - - target = tgt if tgt else default_target - test_size = frac if frac is not None else default_frac - test_size = max(1e-6, min(float(test_size), 0.9)) - return target, test_size - - def _ensure_prepared_from_config(self, config, dp_config=None): - """Check config['datasetConfig']; rebuild splits if (target,test_size) changed.""" - ds_cfg = config.get("dataset_config") if isinstance(config, dict) else None - if not isinstance(ds_cfg, dict): - return - - target, ts = self._resolve_dataset_from_cfg(ds_cfg) - current = getattr(self, "_prepared_key", None) - desired = (target, float(ts)) - if current != desired: - # Do not re-apply DP dynamically here - self._prepare_splits(target_name=target, test_size=ts, dp_config=None) - - # -------------------------------------------------------------------------- - # Federated API - # -------------------------------------------------------------------------- def get_parameters(self, config): - if self.model_type == "nn": - return [val.cpu().numpy() for val in self.model.state_dict().values()] - else: - return booster_to_parameters(self.bst) + if not self._initialized: + try: + self._lazy_init_from_server_config(config if isinstance(config, dict) else {}) + except Exception as e: + if not self._initialized: + self._lazy_init_from_server_config({}) + return [val.cpu().numpy() for val in self.model.state_dict().values()] def set_parameters(self, parameters): - if self.model_type == "nn": - params_dict = zip(self.model.state_dict().keys(), parameters) - state_dict = {k: torch.tensor(v) for k, v in params_dict} - self.model.load_state_dict(state_dict, strict=True) - else: - if parameters and len(parameters) > 0: - self.bst = parameters_to_booster(parameters) + params_dict = zip(self.model.state_dict().keys(), parameters) + state_dict = {k: torch.tensor(v) for k, v in params_dict} + self.model.load_state_dict(state_dict, strict=True) def fit(self, parameters, config): - self.set_parameters(parameters) + if not self._initialized: + self._lazy_init_from_server_config(config) - # Allow server to override target/test_size dynamically - self._ensure_prepared_from_config(config, dp_config=None) - - if self.model_type == "nn": - self.model.train() - local_epochs = config.get("local_epochs", 5) - total_loss = 0.0 - for _ in range(local_epochs): - for X_batch, y_batch in self.train_loader: - self.optimizer.zero_grad() - outputs = self.model(X_batch) - loss = self.criterion(outputs.squeeze(), y_batch) - loss.backward() - self.optimizer.step() - total_loss += loss.item() * X_batch.size(0) - - avg_loss = total_loss / (len(self.train_loader.dataset) * max(local_epochs, 1)) - with torch.no_grad(): - logits = self.model(self.X_tensor).squeeze() # train set - probs = torch.sigmoid(logits).cpu().numpy() - y_true = self.y_tensor.cpu().numpy() - th = config.get("threshold", 0.5) - binary_preds = (probs >= th).astype(int) - acc = accuracy_score(y_true, binary_preds) + self.set_parameters(parameters) + self.model.train() + + local_epochs = config.get("local_epochs", 5) + total_loss = 0.0 + print(f"Training for {local_epochs} epochs...") + + for epoch in range(local_epochs): + print(f"Epoch {epoch + 1}/{local_epochs}") + for X_batch, y_batch in self.train_loader: + self.optimizer.zero_grad() + outputs = self.model(X_batch) + loss = self.criterion(outputs.squeeze(), y_batch) + loss.backward() + self.optimizer.step() + total_loss += loss.item() * X_batch.size(0) + + avg_loss = total_loss / (len(self.train_loader.dataset) * local_epochs) + + with torch.no_grad(): + logits = self.model(self.X_tensor).squeeze() + probs = torch.sigmoid(logits).cpu().numpy() + y_true = self.y_tensor.cpu().numpy() + binary_preds = (probs >= 0.5).astype(int) + try: auc = roc_auc_score(y_true, probs) + except Exception: + auc = float("nan") + acc = accuracy_score(y_true, binary_preds) - metrics = { - "train_loss": avg_loss, - "train_accuracy": acc, - "train_auc": auc, - } - return self.get_parameters(config), len(self.X_tensor), metrics - - else: - local_rounds = int(config.get("num_local_round", config.get("xgb_rounds", self.xgb_rounds))) - self.bst = xgb.train( - self.xgb_params, - self.dtrain, - num_boost_round=local_rounds, - xgb_model=self.bst, # continue from global - ) - preds = self.bst.predict(self.dtrain) - th = config.get("threshold", 0.5) - binary_preds = (preds >= th).astype(int) - acc = float((binary_preds == self.y_np).mean()) - auc = float(roc_auc_score(self.y_np, preds)) if len(np.unique(self.y_np)) > 1 else 0.0 + hostname = socket.gethostname() + os_type = platform.system() + metrics = { + "hostname": hostname, + "os_type": os_type, + "train_loss": avg_loss, + "train_accuracy": acc, + "train_auc": auc, + "features": ",".join(self.effective_features), + "target": self.effective_target, + "val_fraction": self.effective_val_frac, + "test_fraction": self.effective_test_frac, + } - metrics = {"train_accuracy": acc, "train_auc": auc} - return self.get_parameters(config), len(self.y_np), metrics + return self.get_parameters(config), len(self.train_loader.dataset), metrics def evaluate(self, parameters, config): - self.set_parameters(parameters) + if not self._initialized: + self._lazy_init_from_server_config(config) - # Keep eval consistent with any overridden target/test_size - self._ensure_prepared_from_config(config, dp_config=None) - - if self.model_type == "nn": - self.model.eval() - total_loss = 0.0 - all_probs, all_true = [], [] - with torch.no_grad(): - for X_batch, y_batch in self.test_loader: - outputs = self.model(X_batch) - loss = self.criterion(outputs.squeeze(), y_batch) - total_loss += loss.item() * X_batch.size(0) - probs = torch.sigmoid(outputs.squeeze()).cpu().numpy() - all_probs.extend(probs.tolist()) - all_true.extend(y_batch.cpu().numpy().tolist()) - - avg_loss = total_loss / len(self.test_loader.dataset) - th = config.get("threshold", 0.5) - binary_preds = [1 if p >= th else 0 for p in all_probs] - acc = accuracy_score(all_true, binary_preds) + self.set_parameters(parameters) + self.model.eval() + + total_loss = 0.0 + all_probs, all_true = [], [] + with torch.no_grad(): + for X_batch, y_batch in self.test_loader: + outputs = self.model(X_batch) + loss = self.criterion(outputs.squeeze(), y_batch) + total_loss += loss.item() * X_batch.size(0) + probs = torch.sigmoid(outputs.squeeze()).cpu().numpy() + all_probs.extend(probs.tolist()) + all_true.extend(y_batch.cpu().numpy().tolist()) + + avg_loss = total_loss / len(self.test_loader.dataset) if len(self.test_loader.dataset) > 0 else 0.0 + binary_preds = [1 if p >= 0.5 else 0 for p in all_probs] + try: auc = roc_auc_score(all_true, all_probs) - metrics = {"eval_loss": avg_loss, "eval_accuracy": acc, "eval_auc": auc} - return float(avg_loss), len(self.test_loader.dataset), metrics + except Exception: + auc = float("nan") + acc = accuracy_score(all_true, binary_preds) - else: - th = config.get("threshold", 0.5) - preds = self.bst.predict(self.dtest) - binary = (preds >= th).astype(int) - y_true = self.y_np_test - acc = float((binary == y_true).mean()) - auc = float(roc_auc_score(y_true, preds)) if len(np.unique(y_true)) > 1 else 0.0 - metrics = {"eval_accuracy": acc, "eval_auc": auc} - # loss optional for XGB; return 0.0 to satisfy Flower - return 0.0, len(y_true), metrics + metrics = { + "eval_loss": avg_loss, + "eval_accuracy": acc, + "eval_auc": auc, + } + print(f"Evaluation metrics: {metrics}") + + return float(avg_loss), len(self.test_loader.dataset), metrics def get_properties(self, config): hostname = socket.gethostname() os_type = platform.system() - if self.model_type == "nn": - num_samples = len(self.X_tensor) # train samples - num_features = self.X_tensor.shape[1] + if self._initialized: + num_samples = int(self.X_tensor.shape[0]) + num_features = int(self.X_tensor.shape[1]) + features_str = ",".join(self.effective_features) + target_name = self.effective_target + label_counts = pd.Series(self.y_tensor.numpy()).value_counts().to_dict() + classes = sorted(label_counts.keys()) else: - num_samples = len(self.y_np) # train samples - num_features = self.X_np.shape[1] + num_samples = len(self.X_tensor) + num_features = self.X_tensor.shape[1] + features_str = ",".join(self.feature_names) + target_name = self.target_name + label_counts = self.label_counts + classes = self.classes - features_str = ",".join(self.feature_names) - classes_str = ",".join(map(str, self.classes)) - dist_str = ",".join(f"{cls}:{cnt}" for cls, cnt in self.label_counts.items()) + classes_str = ",".join(map(str, classes)) + dist_str = ",".join(f"{cls}:{cnt}" for cls, cnt in label_counts.items()) cpu_physical = psutil.cpu_count(logical=False) cpu_logical = psutil.cpu_count(logical=True) @@ -419,7 +404,7 @@ def get_properties(self, config): "num_samples": num_samples, "num_features": num_features, "features": features_str, - "target": self.target_name, + "target": target_name, "classes": classes_str, "label_distribution": dist_str, "cpu_physical_cores": cpu_physical, @@ -427,61 +412,10 @@ def get_properties(self, config): "total_memory_gb": total_mem_gb, "gpu_driver_present": str(driver_present), "gpu_count": gpu_count, - "model_type": self.model_type, } - def start(self) -> None: - fl.client.start_numpy_client(server_address=self.server_address, client=self) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flower client with NN/XGBoost + optional DP for NN") - parser.add_argument("--server_address", type=str, required=True, help="ex: 127.0.0.1:8080") - parser.add_argument("--data_path", type=str, default="data/data.csv", help="CSV path") - - # Mode - parser.add_argument("--model", type=str, default="nn", choices=["nn", "xgb"], help="Client model type") - - # DP (NN only) - parser.add_argument("--dp", action="store_true", help="Activer la confidentialité différentielle (NN uniquement)") - parser.add_argument("--noise_multiplier", type=float, default=1.0) - parser.add_argument("--max_grad_norm", type=float, default=1.0) - parser.add_argument("--batch_size", type=int, default=32) - - # XGBoost params - parser.add_argument("--xgb_eta", type=float, default=0.1) - parser.add_argument("--xgb_max_depth", type=int, default=6) - parser.add_argument("--xgb_subsample", type=float, default=0.8) - parser.add_argument("--xgb_colsample_bytree", type=float, default=0.8) - parser.add_argument("--xgb_rounds", type=int, default=50) - args = parser.parse_args() - - dp_config = None - if args.dp and args.model == "nn": - dp_config = DPConfig( - noise_multiplier=args.noise_multiplier, - max_grad_norm=args.max_grad_norm, - batch_size=args.batch_size, + def start(self): + fl.client.start_numpy_client( + server_address=self.server_address, + client=self, ) - - xgb_params = None - if args.model == "xgb": - xgb_params = { - "objective": "binary:logistic", - "eval_metric": "logloss", - "eta": args.xgb_eta, - "max_depth": args.xgb_max_depth, - "subsample": args.xgb_subsample, - "colsample_bytree": args.xgb_colsample_bytree, - "tree_method": "hist", - } - - client = FlowerClient( - server_address=args.server_address, - data_path=args.data_path, - dp_config=dp_config, - model_type=args.model, - xgb_params=xgb_params, - xgb_rounds=args.xgb_rounds if args.model == "xgb" else 0, - ) - client.start() diff --git a/MEDfl/rw/model.py b/MEDfl/rw/model.py index 2e37ae1..3c50a1f 100644 --- a/MEDfl/rw/model.py +++ b/MEDfl/rw/model.py @@ -1,11 +1,8 @@ import math -from typing import List, Optional, Literal, Callable, Tuple import torch import torch.nn as nn import torch.nn.functional as F -Task = Literal["binary", "multiclass", "multilabel", "regression"] - _ACTS = { "relu": nn.ReLU, "gelu": nn.GELU, @@ -16,7 +13,7 @@ "identity": nn.Identity, } -def _make_activation(name: str) -> nn.Module: +def _make_activation(name): if name not in _ACTS: raise ValueError(f"Unsupported activation '{name}'. Choose from {list(_ACTS)}") return _ACTS[name]() @@ -35,16 +32,16 @@ class Net(nn.Module): def __init__( self, - input_dim: int, - hidden_dims: List[int] = [64, 32], - activation: str = "relu", - batchnorm: bool = True, - dropout: Optional[float | List[float]] = 0.0, - task: Task = "binary", - num_classes: Optional[int] = None, - output_bias: bool = True, - return_logits: bool = True, - weight_init: Literal["kaiming", "xavier", "none"] = "kaiming", + input_dim, + hidden_dims=[64, 32], + activation="relu", + batchnorm=True, + dropout=0.0, + task="binary", + num_classes=None, + output_bias=True, + return_logits=True, + weight_init="kaiming", ): super().__init__() self.task = task @@ -78,10 +75,9 @@ def __init__( else: raise ValueError("dropout must be float|list[float]|None") - layers = [] - in_dim = input_dim self.blocks = nn.ModuleList() - for i, (h, p) in enumerate(zip(hidden_dims, dropout)): + in_dim = input_dim + for h, p in zip(hidden_dims, dropout): block = nn.ModuleDict({ "lin": nn.Linear(in_dim, h, bias=True), "bn": nn.BatchNorm1d(h) if batchnorm else nn.Identity(), @@ -98,11 +94,15 @@ def __init__( self.apply(lambda m: self._init_weights(m, scheme=weight_init, activation=activation)) @staticmethod - def _init_weights(m: nn.Module, scheme: str, activation: str): + def _init_weights(m, scheme, activation): if isinstance(m, nn.Linear): if scheme == "kaiming": nonlinearity = "leaky_relu" if activation == "leaky_relu" else "relu" - nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5) if activation == "leaky_relu" else 0, nonlinearity=nonlinearity) + nn.init.kaiming_uniform_( + m.weight, + a=math.sqrt(5) if activation == "leaky_relu" else 0, + nonlinearity=nonlinearity, + ) elif scheme == "xavier": nn.init.xavier_uniform_(m.weight) if m.bias is not None: @@ -110,7 +110,7 @@ def _init_weights(m: nn.Module, scheme: str, activation: str): bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 nn.init.uniform_(m.bias, -bound, bound) - def forward(self, x: torch.Tensor) -> torch.Tensor: + def forward(self, x): for blk in self.blocks: x = blk["lin"](x) x = blk["bn"](x) @@ -120,7 +120,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return logits if self.return_logits else self._apply_output_activation(logits) # Inference helpers - def _apply_output_activation(self, logits: torch.Tensor) -> torch.Tensor: + def _apply_output_activation(self, logits): if self.task == "binary": return torch.sigmoid(logits) # (B, 1) elif self.task == "multiclass": @@ -128,15 +128,15 @@ def _apply_output_activation(self, logits: torch.Tensor) -> torch.Tensor: elif self.task == "multilabel": return torch.sigmoid(logits) # (B, L) elif self.task == "regression": - return logits # raw regression output + return logits # raw regression output else: raise RuntimeError("Invalid task") @torch.no_grad() - def predict(self, x: torch.Tensor) -> torch.Tensor: - """Apply appropriate post-processing for predictions.""" - logits = self.forward(x) if self.return_logits else x - probs = self._apply_output_activation(logits) if self.return_logits else logits + def predict(self, x): + out = self.forward(x) + probs = self._apply_output_activation(out) if self.return_logits else out + if self.task == "binary": return (probs >= 0.5).long() elif self.task == "multiclass": diff --git a/MEDfl/rw/server.py b/MEDfl/rw/server.py index dc1cd4d..178658a 100644 --- a/MEDfl/rw/server.py +++ b/MEDfl/rw/server.py @@ -1,7 +1,6 @@ import flwr as fl from flwr.server.strategy import FedAvg from flwr.server.server import ServerConfig -from typing import Optional, Any from MEDfl.rw.strategy import Strategy import asyncio from flwr.server.client_manager import ClientManager @@ -29,11 +28,11 @@ class FederatedServer: def __init__( self, - host: str = "0.0.0.0", - port: int = 8080, - num_rounds: int = 3, - strategy: Optional[Strategy] = None, - certificates: Optional[Any] = None, + host="0.0.0.0", + port=8080, + num_rounds=3, + strategy=None, + certificates=None, ): """ Initialize the FederatedServer. @@ -60,7 +59,7 @@ def __init__( self.connected_clients = [] # Track connected client IDs - def start(self) -> None: + def start(self): """ Start the Flower server with the configured strategy and track client connections. """ @@ -89,7 +88,7 @@ class TrackingClientManager(fl.server.client_manager.SimpleClientManager): client_properties (dict): Placeholder for storing client-specific properties. """ - def __init__(self, server: FederatedServer): + def __init__(self, server): """ Initialize the TrackingClientManager. @@ -100,7 +99,7 @@ def __init__(self, server: FederatedServer): self.server = server self.client_properties = {} - def register(self, client: ClientProxy) -> bool: + def register(self, client): """ Register a client and log its connection. @@ -116,7 +115,7 @@ def register(self, client: ClientProxy) -> bool: asyncio.run(self._fetch_and_log_hostname(client)) return success - async def _fetch_and_log_hostname(self, client: ClientProxy): + async def _fetch_and_log_hostname(self, client): """ Asynchronously fetch and log the client's hostname or CID. diff --git a/MEDfl/rw/strategy.py b/MEDfl/rw/strategy.py index fc3d02b..ba1ce42 100644 --- a/MEDfl/rw/strategy.py +++ b/MEDfl/rw/strategy.py @@ -1,117 +1,98 @@ -# MEDfl/rw/strategy.py +# File: MEDfl/rw/strategy.py import os -import json import numpy as np import flwr as fl -from typing import Callable, Optional, Dict, Any, List, Tuple from flwr.common import GetPropertiesIns from flwr.server.client_manager import ClientManager from flwr.server.client_proxy import ClientProxy import time -from flwr.common import parameters_to_ndarrays, ndarrays_to_parameters - - -# ===== unchanged aggregate_* for metrics (works for both) ===== -def aggregate_fit_metrics(results: List[Tuple[int, Dict[str, float]]]) -> Dict[str, float]: - total = max(sum(n for n, _ in results), 1) +from MEDfl.rw.model import Net +import torch + +# =================================================== +# Custom metric aggregation functions +# =================================================== +def aggregate_fit_metrics(results): + total = sum(n for n, _ in results) loss = sum(m.get("train_loss", 0.0) * n for n, m in results) / total - acc = sum(m.get("train_accuracy", 0.0) * n for n, m in results) / total - auc = sum(m.get("train_auc", 0.0) * n for n, m in results) / total + acc = sum(m.get("train_accuracy", 0.0) * n for n, m in results) / total + auc = sum(m.get("train_auc", 0.0) * n for n, m in results) / total return {"train_loss": loss, "train_accuracy": acc, "train_auc": auc} -def aggregate_eval_metrics(results: List[Tuple[int, Dict[str, float]]]) -> Dict[str, float]: - total = max(sum(n for n, _ in results), 1) +def aggregate_eval_metrics(results): + total = sum(n for n, _ in results) loss = sum(m.get("eval_loss", 0.0) * n for n, m in results) / total - acc = sum(m.get("eval_accuracy", 0.0) * n for n, m in results) / total - auc = sum(m.get("eval_auc", 0.0) * n for n, m in results) / total + acc = sum(m.get("eval_accuracy", 0.0) * n for n, m in results) / total + auc = sum(m.get("eval_auc", 0.0) * n for n, m in results) / total return {"eval_loss": loss, "eval_accuracy": acc, "eval_auc": auc} -# ========== NEW: helper to decode XGB booster parameters ========== -# --- helpers for XGB bytes <-> Booster (updated to JSON format) --- -def _booster_to_json_bytes(bst) -> bytes: - """Return XGBoost Booster as JSON bytes (so we can edit trees).""" - # xgboost>=1.7 supports raw_format='json' - raw = bst.save_raw(raw_format='json') - return bytes(raw) - -def _ensure_json_bytes(raw_bytes: bytes) -> bytes: - """If bytes are not JSON (binary model), try converting via Booster load+save.""" - try: - # Quick sniff: JSON starts with '{' or '[' - b0 = raw_bytes.lstrip()[:1] - if b0 in (b'{', b'['): - return raw_bytes - except Exception: - pass - # Convert binary -> JSON - import xgboost as xgb - bst = xgb.Booster() - bst.load_model(bytearray(raw_bytes)) - return _booster_to_json_bytes(bst) - -def _get_tree_nums(xgb_model_org: bytes) -> Tuple[int, int]: - xgb_model = json.loads(bytearray(xgb_model_org)) - tree_num = int(xgb_model["learner"]["gradient_booster"]["model"]["gbtree_model_param"]["num_trees"]) - paral_tree_num = int(xgb_model["learner"]["gradient_booster"]["model"]["gbtree_model_param"]["num_parallel_tree"]) - return tree_num, paral_tree_num - -def _aggregate_trees(bst_prev_org: Optional[bytes], bst_curr_org: bytes) -> bytes: - """Conduct bagging aggregation by appending trees from current to previous.""" - bst_curr_org = _ensure_json_bytes(bst_curr_org) - - if not bst_prev_org: - # First model in the round becomes the base - return bst_curr_org - - bst_prev_org = _ensure_json_bytes(bst_prev_org) - - tree_num_prev, _ = _get_tree_nums(bst_prev_org) - _, paral_tree_num_curr = _get_tree_nums(bst_curr_org) - - bst_prev = json.loads(bytearray(bst_prev_org)) - bst_curr = json.loads(bytearray(bst_curr_org)) - - # Update counts - bst_prev["learner"]["gradient_booster"]["model"]["gbtree_model_param"]["num_trees"] = str( - tree_num_prev + paral_tree_num_curr - ) - - # Update iteration_indptr - iteration_indptr = bst_prev["learner"]["gradient_booster"]["model"]["iteration_indptr"] - iteration_indptr.append(iteration_indptr[-1] + paral_tree_num_curr) - - # Append current trees, re-id them - trees_curr = bst_curr["learner"]["gradient_booster"]["model"]["trees"] - for t in range(paral_tree_num_curr): - trees_curr[t]["id"] = tree_num_prev + t - bst_prev["learner"]["gradient_booster"]["model"]["trees"].append(trees_curr[t]) - bst_prev["learner"]["gradient_booster"]["model"]["tree_info"].append(0) - - return bytes(json.dumps(bst_prev), "utf-8") - +# =================================================== +# Strategy Wrapper +# =================================================== class Strategy: + """ + Flower Strategy wrapper: + - Dynamic hyperparameters via on_fit_config_fn + - Custom metric aggregation + - Per-client & aggregated metric logging + - Synchronous get_properties() inspection in configure_fit() + - Saving global parameters every saveOnRounds to savingPath + + Extended: + - split_mode: + * "global": use global val_fraction/test_fraction for all clients + * "per_client": use client_fractions[hostname] if present + - client_fractions: + { + "HOSTNAME_1": { + "val_fraction": float (optional), + "test_fraction": float (optional), + "test_ids": [..] or "id1,id2" (optional) + }, + ... + } + - In per_client mode: + * if test_ids is present for a client: + -> send test_ids + -> do NOT use that client's test_fraction + * otherwise: + -> use that client's val_fraction/test_fraction if provided, + else fall back to global val_fraction/test_fraction + - client id in this mapping = hostname from client.get_properties() + - id_col: + * column name used on clients to match test_ids (default "id") + """ + def __init__( self, - name: str = "FedAvg", - fraction_fit: float = 1.0, - fraction_evaluate: float = 1.0, - min_fit_clients: int = 2, - min_evaluate_clients: int = 2, - min_available_clients: int = 2, - initial_parameters: Optional[List[Any]] = None, - evaluate_fn: Optional[Callable] = None, - fit_metrics_aggregation_fn: Optional[Callable] = None, - evaluate_metrics_aggregation_fn: Optional[Callable] = None, - local_epochs: int = 1, - threshold: float = 0.5, - learning_rate: float = 0.01, - optimizer_name: str = "SGD", - savingPath: str = "", - saveOnRounds: int = 3, - total_rounds: int = 3, - datasetConfig: Dict[str, Any] = {}, - ) -> None: + name="FedAvg", + fraction_fit=1.0, + fraction_evaluate=1.0, + min_fit_clients=2, + min_evaluate_clients=2, + min_available_clients=2, + initial_parameters=None, + evaluate_fn=None, + fit_metrics_aggregation_fn=None, + evaluate_metrics_aggregation_fn=None, + local_epochs=1, + threshold=0.5, + learning_rate=0.01, + optimizer_name="SGD", + savingPath="", + saveOnRounds=3, + total_rounds=3, + features="", + target="", + val_fraction=0.10, + test_fraction=0.10, + # NEW: splitting control (added at the end to not break existing calls) + split_mode="global", # "global" or "per_client" + client_fractions=None, + # NEW: id column for test_ids mapping + id_col="id", + ): self.name = name self.fraction_fit = fraction_fit self.fraction_evaluate = fraction_evaluate @@ -124,6 +105,7 @@ def __init__( self.fit_metrics_aggregation_fn = fit_metrics_aggregation_fn or aggregate_fit_metrics self.evaluate_metrics_aggregation_fn = evaluate_metrics_aggregation_fn or aggregate_eval_metrics + # Dynamic hyperparams self.local_epochs = local_epochs self.threshold = threshold self.learning_rate = learning_rate @@ -131,299 +113,190 @@ def __init__( self.savingPath = savingPath self.saveOnRounds = saveOnRounds self.total_rounds = total_rounds - self.datasetConfig = datasetConfig + self._features = features # comma-separated or "" + self._target = target # or "" + self._val_fraction = val_fraction + self._test_fraction = test_fraction - self.strategy_object: Optional[fl.server.strategy.Strategy] = None + # NEW + self.split_mode = split_mode + self.client_fractions = client_fractions or {} + self.id_col = id_col - def create_strategy(self) -> None: - # ======== Branch: custom XGB strategy ======== - if self.name == "XGBoostBagging": - self.strategy_object = self._create_xgb_bagging_strategy() - return + self.strategy_object = None - # ======== Default: use Flower built-in by name (e.g., FedAvg) ======== + def create_strategy(self): + # 1) Pick the Flower Strategy class StrategyClass = getattr(fl.server.strategy, self.name) - # ======== Common params for all built-in strategies ======== - def fit_config_fn(server_round: int) -> Dict[str, Any]: - return {"local_epochs": self.local_epochs, "threshold": self.threshold, "learning_rate": self.learning_rate, "optimizer": self.optimizer_name , "dataset_config": self.datasetConfig} - - params: Dict[str, Any] = { - "fraction_fit": self.fraction_fit, - "fraction_evaluate": self.fraction_evaluate, - "min_fit_clients": self.min_fit_clients, - "min_evaluate_clients": self.min_evaluate_clients, - "min_available_clients": self.min_available_clients, - "evaluate_fn": self.evaluate_fn, - "on_fit_config_fn": fit_config_fn, - "fit_metrics_aggregation_fn": self.fit_metrics_aggregation_fn, - "evaluate_metrics_aggregation_fn": self.evaluate_metrics_aggregation_fn, + # 2) Define on_fit_config_fn _before_ instantiation (global defaults) + def fit_config_fn(server_round): + return { + "local_epochs": self.local_epochs, + "threshold": self.threshold, + "learning_rate": self.learning_rate, + "optimizer": self.optimizer_name, + "features": self._features, + "target": self._target, + "val_fraction": float(self._val_fraction), + "test_fraction": float(self._test_fraction), + # NEW: always send id_col so clients know which column to use for test_ids + "id_col": self.id_col, + } + + # 3) Build params including on_fit_config_fn + params = { + "fraction_fit": self.fraction_fit, + "fraction_evaluate": self.fraction_evaluate, + "min_fit_clients": self.min_fit_clients, + "min_evaluate_clients": self.min_evaluate_clients, + "min_available_clients": self.min_available_clients, + "evaluate_fn": self.evaluate_fn, + "on_fit_config_fn": fit_config_fn, + "fit_metrics_aggregation_fn": self.fit_metrics_aggregation_fn, + "evaluate_metrics_aggregation_fn": self.evaluate_metrics_aggregation_fn, } if self.initial_parameters: params["initial_parameters"] = fl.common.ndarrays_to_parameters(self.initial_parameters) + else: + # derive initial params from server-specified features + feat_cols = [c.strip() for c in (self._features or "").split(",") if c.strip()] + if not feat_cols: + raise ValueError( + "No initial_parameters provided and 'features' is empty. " + "Provide Strategy(..., features='col1,col2,...') or pass initial_parameters." + ) + input_dim = len(feat_cols) + _model = Net(input_dim) + _arrays = [t.detach().cpu().numpy() for t in _model.state_dict().values()] + params["initial_parameters"] = fl.common.ndarrays_to_parameters(_arrays) + # 4) Instantiate the real Flower strategy strat = StrategyClass(**params) - # Wrap aggregate_fit for logging + optional saving + # 5) Wrap aggregate_fit for logging (prints unchanged) original_agg_fit = strat.aggregate_fit def logged_agg_fit(server_round, results, failures): print(f"\n[Server] 🔄 Round {server_round} - Client Training Metrics:") - for (client_id, fit_res) in results: + for i, (client_id, fit_res) in enumerate(results): print(f" CTM Round {server_round} Client:{client_id.cid}: {fit_res.metrics}") - agg_params, metrics = original_agg_fit(server_round, results, failures) print(f"[Server] ✅ Round {server_round} - Aggregated Training Metrics: {metrics}\n") - - # ⬇️ Only try to save when we actually have parameters - should_checkpoint = ( - self.savingPath - and ((server_round % self.saveOnRounds == 0) or (self.total_rounds and server_round == self.total_rounds)) - ) - - if should_checkpoint and agg_params is not None: - try: - arrays = fl.common.parameters_to_ndarrays(agg_params) - filename = ( - f"round_{server_round}_final_model.npz" - if (self.total_rounds and server_round == self.total_rounds) - else f"round_{server_round}_model.npz" - ) - os.makedirs(self.savingPath, exist_ok=True) - np.savez(os.path.join(self.savingPath, filename), *arrays) - print(f"[Server] 💾 Saved checkpoint to {os.path.join(self.savingPath, filename)}") - except Exception as e: - print(f"[Server] ⚠️ Skipped saving checkpoint (no parameters or conversion failed): {e}") - elif should_checkpoint and agg_params is None: - print("[Server] ⚠️ Skipped checkpoint: aggregate_fit returned None (no aggregated parameters this round).") - + # save the model parameters if savingPath is set on each saveOnRounds + if self.savingPath and ( + (server_round % self.saveOnRounds == 0) + or (self.total_rounds and server_round == self.total_rounds) + ): + arrays = fl.common.parameters_to_ndarrays(agg_params) + # Determine filename: final_model on last round else round_{n} + filename = ( + f"round_{server_round}_final_model.npz" + if server_round == self.total_rounds + else f"round_{server_round}_model.npz" + ) + filepath = os.path.join(self.savingPath, filename) + np.savez(filepath, *arrays) return agg_params, metrics strat.aggregate_fit = logged_agg_fit - + # 6) Wrap aggregate_evaluate for logging (prints unchanged) original_agg_eval = strat.aggregate_evaluate + def logged_agg_eval(server_round, results, failures): print(f"\n[Server] 📊 Round {server_round} - Client Evaluation Metrics:") - for (client_id, eval_res) in results: + for i, (client_id, eval_res) in enumerate(results): print(f" CEM Round {server_round} Client:{client_id.cid}: {eval_res.metrics}") loss, metrics = original_agg_eval(server_round, results, failures) - print(f"[Server] ✅ Round {server_round} - Aggregated Evaluation Metrics:\n Loss: {loss}, Metrics: {metrics}\n") + print(f"[Server] ✅ Round {server_round} - Aggregated Evaluation Metrics:") + print(f" Loss: {loss}, Metrics: {metrics}\n") return loss, metrics + strat.aggregate_evaluate = logged_agg_eval + # 7) Wrap configure_fit to: + # - log client properties (unchanged) + # - apply split_mode/client_fractions to fit_ins.config (NEW) original_conf_fit = strat.configure_fit - def wrapped_conf_fit(server_round: int, parameters, client_manager: ClientManager): - selected = original_conf_fit(server_round=server_round, parameters=parameters, client_manager=client_manager) - ins = GetPropertiesIns(config={}) - for client, _ in selected: - try: - props = client.get_properties(ins=ins, timeout=10.0, group_id=0) - print(f"\n📋 [Round {server_round}] Client {client.cid} Properties: {props.properties}") - except Exception as e: - print(f"⚠️ Failed to get properties from {client.cid}: {e}") - return selected - strat.configure_fit = wrapped_conf_fit - self.strategy_object = strat + def wrapped_conf_fit( + server_round, + parameters, + client_manager + ): + selected = original_conf_fit( + server_round=server_round, + parameters=parameters, + client_manager=client_manager + ) - # ---------- NEW: Custom XGBoost strategy ---------- - def _create_xgb_bagging_strategy_old(self) -> fl.server.strategy.Strategy: - from flwr.common import FitIns, Parameters, ndarrays_to_parameters - from flwr.server.client_manager import ClientManager - - class XGBoostBagging(fl.server.strategy.Strategy): - def __init__(self, outer: "Strategy"): - self.outer = outer - self.global_parameters: Parameters | None = None - - def initialize_parameters(self, client_manager: ClientManager): - return self.global_parameters - - def configure_fit(self, server_round, parameters, client_manager: ClientManager): - # (unchanged) - sample_n = max(self.outer.min_fit_clients, 1) - clients = client_manager.sample(num_clients=sample_n, min_num_clients=self.outer.min_fit_clients) - fit_ins = FitIns(parameters if parameters is not None else self.global_parameters, - {"xgb_rounds": self.outer.local_epochs if self.outer.local_epochs > 0 else 50}) - return [(client, fit_ins) for client in clients] - - def aggregate_fit(self, server_round, results, failures): - # (unchanged: select-best) - best = None - for (client, fit_res) in results: - m = fit_res.metrics or {} - score = (m.get("train_auc", 0.0), m.get("train_accuracy", 0.0)) - if (best is None) or (score > best["score"]): - best = {"score": score, "parameters": fit_res.parameters} - if best is not None: - self.global_parameters = best["parameters"] - - metrics = self.outer.fit_metrics_aggregation_fn([(r.num_examples, r.metrics) for (_, r) in results]) - print(f"[Server-XGB] ✅ Round {server_round} - Selected best booster (AUC,ACC)={best['score'] if best else None}") - - # Optional saving (unchanged) - if self.outer.savingPath and ((server_round % self.outer.saveOnRounds == 0) or - (self.outer.total_rounds and server_round == self.outer.total_rounds)): - os.makedirs(self.outer.savingPath, exist_ok=True) - if self.global_parameters is not None: - raw = bytes(self.global_parameters.tensors[0]) if hasattr(self.global_parameters, "tensors") \ - else bytes(self.global_parameters[0].tolist()) - with open(os.path.join(self.outer.savingPath, f"round_{server_round}_xgb.model"), "wb") as f: - f.write(raw) - - return self.global_parameters, metrics - - def configure_evaluate(self, server_round, parameters, client_manager: ClientManager): - # No centralized eval: return empty list - return [] - - def aggregate_evaluate(self, server_round, results, failures): - if not results: - return 0.0, {} - loss = 0.0 - metrics = self.outer.evaluate_metrics_aggregation_fn([(r.num_examples, r.metrics) for (_, r) in results]) - return loss, metrics - - # 🔧 ADD THIS METHOD to satisfy the abstract interface - def evaluate(self, server_round, parameters): - # No server-side evaluation; Flower expects Optional[Tuple[float, dict]] - return None - - strat = XGBoostBagging(self) - - # Log client props, like the NN path - original_conf_fit = strat.configure_fit - def wrapped_conf_fit(server_round, parameters, client_manager): - selected = original_conf_fit(server_round, parameters, client_manager) ins = GetPropertiesIns(config={}) - for client, _ in selected: + + for client, fit_ins in selected: + hostname = None try: props = client.get_properties(ins=ins, timeout=10.0, group_id=0) print(f"\n📋 [Round {server_round}] Client {client.cid} Properties: {props.properties}") + hostname = props.properties.get("hostname", None) except Exception as e: print(f"⚠️ Failed to get properties from {client.cid}: {e}") - return selected - strat.configure_fit = wrapped_conf_fit - - return strat - - def _create_xgb_bagging_strategy(self) -> fl.server.strategy.Strategy: - from flwr.common import FitIns, Parameters, ndarrays_to_parameters - from flwr.common import Scalar - from typing import Union, cast - - class XGBoostBagging(fl.server.strategy.Strategy): - def __init__(self, outer: "Strategy"): - self.outer = outer - self.global_model_bytes: Optional[bytes] = None # JSON bytes - - def initialize_parameters(self, client_manager: ClientManager): - if self.global_model_bytes is None: - return None - # wrap the JSON bytes into a uint8 numpy array for NumPyClient - arr = np.frombuffer(self.global_model_bytes, dtype=np.uint8) - return ndarrays_to_parameters([arr]) - - - def configure_fit(self, server_round: int, parameters, client_manager: ClientManager): - # Sample exactly min_fit_clients (or at least 1) - sample_n = max(self.outer.min_fit_clients, 1) - clients = client_manager.sample( - num_clients=sample_n, min_num_clients=self.outer.min_fit_clients - ) - - # Tell clients how many trees to add and return - cfg = { - "num_local_round": int(self.outer.local_epochs) if self.outer.local_epochs > 0 else 50, - "global_round": int(server_round), - } - fit_ins = FitIns( - parameters if parameters is not None else self.initialize_parameters(client_manager), - cfg, - ) - return [(client, fit_ins) for client in clients] - - def aggregate_fit( - self, - server_round: int, - results: List[Tuple[ClientProxy, "flwr.common.FitRes"]], - failures: List[Union[Tuple[ClientProxy, "flwr.common.FitRes"], BaseException]], - ): - # Accept failures policy like FedAvg (optional) - if not results: - return None, {} - if not getattr(self, "accept_failures", True) and failures: - return None, {} - - # Start from current global; append every client's newly trained trees - global_bytes = self.global_model_bytes - for _, fit_res in results: - arrays = parameters_to_ndarrays(fit_res.parameters) # list of np.ndarrays - for arr in arrays: - raw = arr.tobytes() # recover the exact buffer we sent - raw_json = _ensure_json_bytes(raw) # accept binary/JSON; normalize to JSON - # (Optional but recommended) slice only the last K trees trained locally: - # k = int(self.outer.local_epochs) if self.outer.local_epochs > 0 else 50 - # raw_json = _slice_last_k_trees(raw_json, k) - global_bytes = _aggregate_trees(global_bytes, raw_json) - - - self.global_model_bytes = global_bytes - - # Optional metric aggregation for logging - metrics = self.outer.fit_metrics_aggregation_fn( - [(r.num_examples, r.metrics) for (_, r) in results] - ) - - # Optional checkpointing - if self.outer.savingPath and ( - (server_round % self.outer.saveOnRounds == 0) - or (self.outer.total_rounds and server_round == self.outer.total_rounds) - ): - os.makedirs(self.outer.savingPath, exist_ok=True) - if self.global_model_bytes is not None: - with open(os.path.join(self.outer.savingPath, f"round_{server_round}_xgb.model"), "wb") as f: - f.write(self.global_model_bytes) + # Fallback: if no hostname returned, use Flower cid + if not hostname: + hostname = client.cid - arr = np.frombuffer(self.global_model_bytes, dtype=np.uint8) - return ndarrays_to_parameters([arr]), metrics + # Keep same object + cfg = fit_ins.config + if self.split_mode == "per_client": + # Lookup by hostname (preferred) or cid + per_cfg = ( + self.client_fractions.get(hostname) + or self.client_fractions.get(client.cid) + or {} + ) - def configure_evaluate(self, server_round, parameters, client_manager: ClientManager): - # You can keep distributed evaluation if your clients implement it; - # otherwise return [] to disable. - return [] - - def aggregate_evaluate(self, server_round, results, failures): - if not results: - return 0.0, {} - loss = 0.0 - metrics = self.outer.evaluate_metrics_aggregation_fn( - [(r.num_examples, r.metrics) for (_, r) in results] - ) - return loss, metrics - - def evaluate(self, server_round, parameters): - # No server-side eval by default - return None - - strat = XGBoostBagging(self) + # val_fraction: per-client override if present + if "val_fraction" in per_cfg: + try: + cfg["val_fraction"] = float(per_cfg["val_fraction"]) + except Exception: + pass # keep existing if invalid + + # test: prefer test_ids if provided + if "test_ids" in per_cfg and per_cfg["test_ids"]: + test_ids_val = per_cfg["test_ids"] + if isinstance(test_ids_val, (list, tuple, set)): + test_ids_str = ",".join(str(x) for x in test_ids_val) + else: + test_ids_str = str(test_ids_val) + cfg["test_ids"] = test_ids_str + # when using explicit IDs, do not force a test_fraction for this client + if "test_fraction" in cfg: + del cfg["test_fraction"] + # ensure id_col is sent so client can map IDs + cfg["id_col"] = self.id_col + else: + # no test_ids -> use per-client test_fraction if present + if "test_fraction" in per_cfg: + try: + cfg["test_fraction"] = float(per_cfg["test_fraction"]) + except Exception: + pass # keep existing if invalid + # if no test_ids: id_col not strictly required, leave as-is + else: + # split_mode == "global": enforce global fractions, clear any test_ids + if "test_ids" in cfg: + del cfg["test_ids"] + cfg["val_fraction"] = float(self._val_fraction) + cfg["test_fraction"] = float(self._test_fraction) + # also send id_col so clients know column name if needed + cfg["id_col"] = self.id_col - # (Optional) property logging identical to your previous wrapper - original_conf_fit = strat.configure_fit - def wrapped_conf_fit(server_round, parameters, client_manager): - selected = original_conf_fit(server_round, parameters, client_manager) - ins = GetPropertiesIns(config={}) - for client, _ in selected: - try: - props = client.get_properties(ins=ins, timeout=10.0, group_id=0) - print(f"\n📋 [Round {server_round}] Client {client.cid} Properties: {props.properties}") - except Exception as e: - print(f"⚠️ Failed to get properties from {client.cid}: {e}") return selected - strat.configure_fit = wrapped_conf_fit - return strat + strat.configure_fit = wrapped_conf_fit + # 8) Save the ready-to-use strategy + self.strategy_object = strat diff --git a/build/lib/MEDfl/LearningManager/__init__.py b/build/lib/MEDfl/LearningManager/__init__.py new file mode 100644 index 0000000..88a6314 --- /dev/null +++ b/build/lib/MEDfl/LearningManager/__init__.py @@ -0,0 +1,13 @@ +# MEDfl/LearningManager/__init__.py + +# Import modules from this package +# from .client import * +# from .dynamicModal import * +# from .flpipeline import * +# from .federated_dataset import * +# from .model import * +# from .params_optimiser import * +# from .plot import * +# from .server import * +# from .strategy import * +# from .utils import * diff --git a/build/lib/MEDfl/LearningManager/client.py b/build/lib/MEDfl/LearningManager/client.py new file mode 100644 index 0000000..4a18ea8 --- /dev/null +++ b/build/lib/MEDfl/LearningManager/client.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python3 +import flwr as fl +from opacus import PrivacyEngine +from torch.utils.data import DataLoader + +from .model import Model +from .utils import params +import torch + +class FlowerClient(fl.client.NumPyClient): + """ + FlowerClient class for creating MEDfl clients. + + Attributes: + cid (str): Client ID. + local_model (Model): Local model of the federated learning network. + trainloader (DataLoader): DataLoader for training data. + valloader (DataLoader): DataLoader for validation data. + diff_priv (bool): Flag indicating whether to use differential privacy. + """ + def __init__(self, cid: str, local_model: Model, trainloader: DataLoader, valloader: DataLoader, diff_priv: bool = params["diff_privacy"]): + """ + Initializes the FlowerClient instance. + + Args: + cid (str): Client ID. + local_model (Model): Local model of the federated learning network. + trainloader (DataLoader): DataLoader for training data. + valloader (DataLoader): DataLoader for validation data. + diff_priv (bool): Flag indicating whether to use differential privacy. + """ + self.cid = cid + self.local_model = local_model + self.trainloader = trainloader + self.valloader = valloader + if torch.cuda.is_available(): + num_cuda_devices = torch.cuda.device_count() + if num_cuda_devices > 0: + device_idx = int(self.cid) % num_cuda_devices + self.device = torch.device(f"cuda:{device_idx}") + self.local_model.model.to(self.device) + else: + # Handle case where CUDA is available but no CUDA devices are found + raise RuntimeError("CUDA is available, but no CUDA devices are found.") + else: + # Handle case where CUDA is not available + self.device = torch.device("cpu") + self.local_model.model.to(self.device) + + self.privacy_engine = PrivacyEngine(secure_mode=False) + self.diff_priv = diff_priv + self.epsilons = [] + self.accuracies = [] + self.losses = [] + if self.diff_priv: + model, optimizer, self.trainloader = self.privacy_engine.make_private_with_epsilon( + module=self.local_model.model.train(), + optimizer=self.local_model.optimizer, + data_loader=self.trainloader, + epochs=params["train_epochs"], + target_epsilon=float(params["EPSILON"]), + target_delta= float(params["DELTA"]), + max_grad_norm=params["MAX_GRAD_NORM"], + ) + setattr(self.local_model, "model", model) + setattr(self.local_model, "optimizer", optimizer) + self.validate() + + def validate(self): + """Validates cid, local_model, trainloader, valloader.""" + if not isinstance(self.cid, str): + raise TypeError("cid argument must be a string") + + if not isinstance(self.local_model, Model): + raise TypeError("local_model argument must be a MEDfl.LearningManager.model.Model") + + if not isinstance(self.trainloader, DataLoader): + raise TypeError("trainloader argument must be a torch.utils.data.dataloader") + + if not isinstance(self.valloader, DataLoader): + raise TypeError("valloader argument must be a torch.utils.data.dataloader") + + if not isinstance(self.diff_priv, bool): + raise TypeError("diff_priv argument must be a bool") + + def get_parameters(self, config): + """ + Returns the current parameters of the local model. + + Args: + config: Configuration information. + + Returns: + Numpy array: Parameters of the local model. + """ + print(f"[Client {self.cid}] get_parameters") + return self.local_model.get_parameters() + + def fit(self, parameters, config): + """ + Fits the local model to the received parameters using federated learning. + + Args: + parameters: Parameters received from the server. + config: Configuration information. + + Returns: + Tuple: Parameters of the local model, number of training examples, and privacy information. + """ + print('\n -------------------------------- \n this is the config of the client') + print(f"[Client {self.cid}] fit, config: {config}") + # print(config['epochs']) + print('\n -------------------------------- \n ') + self.local_model.set_parameters(parameters) + for _ in range(params["train_epochs"]): + epsilon = self.local_model.train( + self.trainloader, + epoch=_, + device=self.device, + privacy_engine=self.privacy_engine, + diff_priv=self.diff_priv, + ) + self.epsilons.append(epsilon) + print(f"epsilon of client {self.cid} : eps = {epsilon}") + return ( + self.local_model.get_parameters(), + len(self.trainloader), + {"epsilon": epsilon}, + ) + + def evaluate(self, parameters, config): + """ + Evaluates the local model on the validation data and returns the loss and accuracy. + + Args: + parameters: Parameters received from the server. + config: Configuration information. + + Returns: + Tuple: Loss, number of validation examples, and accuracy information. + """ + print(f"[Client {self.cid}] evaluate, config: {config}") + self.local_model.set_parameters(parameters) + loss, accuracy , auc = self.local_model.evaluate( + self.valloader, device=self.device + ) + self.losses.append(loss) + self.accuracies.append(accuracy) + + return float(loss), len(self.valloader), {"accuracy": float(accuracy)} diff --git a/build/lib/MEDfl/LearningManager/dynamicModal.py b/build/lib/MEDfl/LearningManager/dynamicModal.py new file mode 100644 index 0000000..12db5c4 --- /dev/null +++ b/build/lib/MEDfl/LearningManager/dynamicModal.py @@ -0,0 +1,287 @@ +import torch +import torch.nn as nn +from sklearn.svm import SVC + +class DynamicModel: + """DynamicModel class for creating various types of neural network models.""" + + # Create a binary classifier model + @staticmethod + def create_binary_classifier(input_dim, hidden_dims, output_dim, activation='relu', dropout_rate=0.0, + batch_norm=False, use_gpu=False): + """ + Creates a binary classifier neural network model with customizable architecture. + + Args: + input_dim (int): Dimension of the input data. + hidden_dims (List[int]): List of dimensions for hidden layers. + output_dim (int): Dimension of the output (number of classes). + activation (str, optional): Activation function for hidden layers. Default is 'relu'. + dropout_rate (float, optional): Dropout rate for regularization. Default is 0.0 (no dropout). + batch_norm (bool, optional): Whether to apply batch normalization. Default is False. + use_gpu (bool, optional): Whether to use GPU acceleration. Default is False. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + + layers = [] + + for i in range(len(hidden_dims)): + if i == 0: + layers.append(nn.Linear(input_dim, hidden_dims[0])) + else: + layers.append(nn.Linear(hidden_dims[i - 1], hidden_dims[i])) + + if batch_norm: + layers.append(nn.BatchNorm1d(hidden_dims[i])) + + activation_layer = nn.ReLU() if activation == 'relu' else nn.Sigmoid() + layers.append(activation_layer) + + if dropout_rate > 0.0: + layers.append(nn.Dropout(dropout_rate)) + + layers.append(nn.Linear(hidden_dims[-1], output_dim)) + layers.append(nn.Sigmoid()) + + model = nn.Sequential(*layers) + + if use_gpu: + model = model.cuda() + + return model + + # Create a multi-class classifier model + @staticmethod + def create_multiclass_classifier(input_dim, hidden_dims, output_dim, activation='relu', dropout_rate=0.0, + batch_norm=False, use_gpu=False): + """ + Creates a multiclass classifier neural network model with customizable architecture. + + Args: + input_dim (int): Dimension of the input data. + hidden_dims (List[int]): List of dimensions for hidden layers. + output_dim (int): Dimension of the output (number of classes). + activation (str, optional): Activation function for hidden layers. Default is 'relu'. + dropout_rate (float, optional): Dropout rate for regularization. Default is 0.0 (no dropout). + batch_norm (bool, optional): Whether to apply batch normalization. Default is False. + use_gpu (bool, optional): Whether to use GPU acceleration. Default is False. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + layers = [] + + for i in range(len(hidden_dims)): + if i == 0: + layers.append(nn.Linear(input_dim, hidden_dims[0])) + else: + layers.append(nn.Linear(hidden_dims[i - 1], hidden_dims[i])) + + if batch_norm: + layers.append(nn.BatchNorm1d(hidden_dims[i])) + + activation_layer = nn.ReLU() if activation == 'relu' else nn.Sigmoid() + layers.append(activation_layer) + + if dropout_rate > 0.0: + layers.append(nn.Dropout(dropout_rate)) + + layers.append(nn.Linear(hidden_dims[-1], output_dim)) + layers.append(nn.LogSoftmax(dim=1)) + + model = nn.Sequential(*layers) + + if use_gpu: + model = model.cuda() + + return model + + # Create a linear regressor model + @staticmethod + def create_linear_regressor(input_dim, output_dim, use_gpu=False): + """ + Creates a linear regressor neural network model. + + Args: + input_dim (int): Dimension of the input data. + output_dim (int): Dimension of the output. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + class LinearRegressionModel(nn.Module): + def __init__(self): + super(LinearRegressionModel, self).__init__() + self.linear = nn.Linear(input_dim, output_dim) + + def forward(self, x): + return self.linear(x) + + model = LinearRegressionModel() + + if use_gpu: + model = model.cuda() + + return model + + # Create a logistic regressor model + @staticmethod + def create_logistic_regressor(input_dim, use_gpu=False): + """ + Creates a logistic regressor neural network model. + + Args: + input_dim (int): Dimension of the input data. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + class LogisticRegressionModel(nn.Module): + def __init__(self): + super(LogisticRegressionModel, self).__init__() + self.linear = nn.Linear(input_dim, 1) + + def forward(self, x): + return torch.sigmoid(self.linear(x)) + + model = LogisticRegressionModel() + + if use_gpu: + model = model.cuda() + + return model + + @staticmethod + def create_convolutional_neural_network(input_channels, output_channels, kernel_size, use_gpu=False): + """ + Creates a convolutional neural network (CNN) model. + + Args: + input_channels (int): Number of input channels. + output_channels (int): Number of output channels. + kernel_size (int): Size of the convolutional kernel. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + + model = nn.Sequential( + nn.Conv2d(input_channels, output_channels, kernel_size), + nn.ReLU(), + nn.MaxPool2d(2) + ) + + if use_gpu: + model = model.cuda() + + return model + + @staticmethod + def create_recurrent_neural_network(input_size, hidden_size, use_gpu=False): + """ + Creates a recurrent neural network (RNN) model. + + Args: + input_size (int): Size of the input. + hidden_size (int): Size of the hidden layer. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + + model = nn.RNN(input_size, hidden_size, batch_first=True) + + if use_gpu: + model = model.cuda() + + return model + + @staticmethod + def create_lstm_network(input_size, hidden_size, use_gpu=False): + """ + Creates a Long Short-Term Memory (LSTM) network model. + + Args: + input_size (int): Size of the input layer. + hidden_size (int): Size of the hidden layer. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + + model = nn.LSTM(input_size, hidden_size, batch_first=True) + + if use_gpu: + model = model.cuda() + + return model + + # Create the dynamic model + def create_model(self, model_type: str, params_dict={}) -> torch.nn.Module: + """ + Create a specific type of model dynamically based on the given parameters. + + Args: + model_type (str): Type of the model to create ('Binary Classifier', 'Multiclass Classifier', 'Linear Regressor', 'Logistic Regressor', 'SVM', 'Neural Network Classifier', 'Convolutional Neural Network', 'Recurrent Neural Network', 'LSTM Network', 'Autoencoder'). + params_dict (dict): Dictionary containing parameters for model creation. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + if model_type == 'Binary Classifier': + return self.create_binary_classifier( + params_dict['input_dim'], params_dict['hidden_dims'], + params_dict['output_dim'], params_dict.get('activation', 'relu'), + params_dict.get('dropout_rate', 0.0), params_dict.get('batch_norm', False), + params_dict.get('use_gpu', False) + ) + elif model_type == 'Multiclass Classifier': + return self.create_multiclass_classifier( + params_dict['input_dim'], params_dict['hidden_dims'], + params_dict['output_dim'], params_dict.get('activation', 'relu'), + params_dict.get('dropout_rate', 0.0), params_dict.get('batch_norm', False), + params_dict.get('use_gpu', False) + ) + elif model_type == 'Linear Regressor': + return self.create_linear_regressor( + params_dict['input_dim'], params_dict['output_dim'], + params_dict.get('use_gpu', False) + ) + elif model_type == 'Logistic Regressor': + return self.create_logistic_regressor( + params_dict['input_dim'], params_dict.get('use_gpu', False) + ) + elif model_type == 'Neural Network Classifier': + return self.create_neural_network_classifier( + params_dict['input_dim'], params_dict['output_dim'], + params_dict['hidden_dims'], params_dict.get('activation', 'relu'), + params_dict.get('dropout_rate', 0.0), params_dict.get('batch_norm', False), + params_dict.get('num_layers', 2), params_dict.get('use_gpu', False) + ) + elif model_type == 'Convolutional Neural Network': + return self.create_convolutional_neural_network( + params_dict['input_channels'], params_dict['output_channels'], + params_dict['kernel_size'], params_dict.get('use_gpu', False) + ) + elif model_type == 'Recurrent Neural Network': + return self.create_recurrent_neural_network( + params_dict['input_size'], params_dict['hidden_size'], + params_dict.get('use_gpu', False) + ) + elif model_type == 'LSTM Network': + return self.create_lstm_network( + params_dict['input_size'], params_dict['hidden_size'], + params_dict.get('use_gpu', False) + ) + elif model_type == 'Autoencoder': + return self.create_autoencoder( + params_dict['input_size'], params_dict['encoder_hidden_size'], + params_dict.get('use_gpu', False) + ) + else: + raise ValueError("Invalid model type provided") + + + diff --git a/build/lib/MEDfl/LearningManager/federated_dataset.py b/build/lib/MEDfl/LearningManager/federated_dataset.py new file mode 100644 index 0000000..4a451cc --- /dev/null +++ b/build/lib/MEDfl/LearningManager/federated_dataset.py @@ -0,0 +1,60 @@ +from MEDfl.NetManager.net_helper import * +from MEDfl.NetManager.net_manager_queries import * +from MEDfl.NetManager.database_connector import DatabaseManager + +class FederatedDataset: + def __init__( + self, + name: str, + train_nodes: list, + test_nodes: list, + trainloaders: list, + valloaders: list, + testloaders: list, + ): + """ + Represents a Federated Dataset. + + :param name: Name of the Federated Dataset. + :param train_nodes: List of train nodes. + :param test_nodes: List of test nodes. + :param trainloaders: List of train data loaders. + :param valloaders: List of validation data loaders. + :param testloaders: List of test data loaders. + """ + self.name = name + self.train_nodes = train_nodes + self.test_nodes = test_nodes + self.trainloaders = trainloaders + self.valloaders = valloaders + self.testloaders = testloaders + self.size = len(self.trainloaders[0].dataset[0][0]) + + db_manager = DatabaseManager() + db_manager.connect() + self.eng = db_manager.get_connection() + + def create(self, FLsetupId: int): + """ + Create a new Federated Dataset in the database. + + :param FLsetupId: The FLsetup ID associated with the Federated Dataset. + """ + query_params = {"name": self.name, "FLsetupId": FLsetupId} + fedDataId = get_feddataset_id_from_name(self.name) + if fedDataId : + self.id = fedDataId + else: + self.eng.execute(text(INSERT_FLDATASET_QUERY), query_params) + self.id = get_feddataset_id_from_name(self.name) + + + def update(self, FLpipeId: int, FedId: int): + """ + Update the FLpipe ID associated with the Federated Dataset in the database. + + :param FLpipeId: The new FLpipe ID to be updated. + :param FedId: The Federated Dataset ID. + """ + query_params = {"FLpipeId": FLpipeId, "FedId": FedId} + self.eng.execute(text(UPDATE_FLDATASET_QUERY), **query_params) diff --git a/build/lib/MEDfl/LearningManager/flpipeline.py b/build/lib/MEDfl/LearningManager/flpipeline.py new file mode 100644 index 0000000..67c5bd5 --- /dev/null +++ b/build/lib/MEDfl/LearningManager/flpipeline.py @@ -0,0 +1,192 @@ +import datetime +from typing import List +import json +import pandas as pd + + +# File: create_query.py +from sqlalchemy import text +from torch.utils.data import DataLoader, TensorDataset +import torch + +from MEDfl.LearningManager.server import FlowerServer +from MEDfl.LearningManager.utils import params, test +from MEDfl.NetManager.net_helper import get_flpipeline_from_name +from MEDfl.NetManager.net_manager_queries import (CREATE_FLPIPELINE_QUERY, + DELETE_FLPIPELINE_QUERY , CREATE_TEST_RESULTS_QUERY) +from MEDfl.NetManager.database_connector import DatabaseManager + +def create_query(name, description, creation_date, result): + query = text( + f"INSERT INTO FLpipeline(name, description, creation_date, results) " + f"VALUES ('{name}', '{description}', '{creation_date}', '{result}')" + ) + return query + + + +class FLpipeline: + """ + FLpipeline class for managing Federated Learning pipelines. + + Attributes: + name (str): The name of the FLpipeline. + description (str): A description of the FLpipeline. + server (FlowerServer): The FlowerServer object associated with the FLpipeline. + + Methods: + __init__(self, name: str, description: str, server: FlowerServer) -> None: + Initialize FLpipeline with the specified name, description, and server. + + + """ + + def __init__( + self, name: str, description: str, server: FlowerServer + ) -> None: + self.name = name + self.description = description + self.server = server + self.validate() + + db_manager = DatabaseManager() + db_manager.connect() + self.eng = db_manager.get_connection() + + def validate(self) -> None: + """ + Validate the name, description, and server attributes. + Raises: + TypeError: If the name is not a string, the description is not a string, + or the server is not a FlowerServer object. + """ + if not isinstance(self.name, str): + raise TypeError("name argument must be a string") + + if not isinstance(self.description, str): + raise TypeError("description argument must be a string") + + # if not isinstance(self.server, FlowerServer): + # raise TypeError("server argument must be a FlowerServer") + + def create(self, result: str) -> None: + """ + Create a new FLpipeline entry in the database with the given result. + + Args: + result (str): The result string to store in the database. + + """ + creation_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + query = CREATE_FLPIPELINE_QUERY.format( + name=self.name, + description=self.description, + creation_date=creation_date, + result=result, + ) + self.eng.execute(text(query)) + self.id = get_flpipeline_from_name(self.name) + try: + self.server.fed_dataset.update( + FLpipeId=self.id, FedId=self.server.fed_dataset.id + ) + except: + pass + + def delete(self) -> None: + """ + Delete the FLpipeline entry from the database based on its name. + + Note: This is a placeholder method and needs to be implemented based on your specific database setup. + + """ + # Placeholder code for deleting the FLpipeline entry from the database based on the name. + # You need to implement the actual deletion based on your database setup. + self.eng.execute(DELETE_FLPIPELINE_QUERY.format(self.name)) + + + def test_by_node(self, node_name: str, test_frac=1) -> dict: + """ + Test the FLpipeline by node with the specified test_frac. + + Args: + node_name (str): The name of the node to test. + test_frac (float, optional): The fraction of the test data to use. Default is 1. + + Returns: + dict: A dictionary containing the node name and the classification report. + + """ + idx = self.server.fed_dataset.test_nodes.index(node_name) + global_model, test_loader = ( + self.server.global_model, + self.server.fed_dataset.testloaders[idx], + ) + + # Move model to GPU if available + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + global_model.model.to(device) + + # Prepare test data + test_data = test_loader.dataset + num_samples = int(test_frac * len(test_data)) + test_data = TensorDataset(test_data[:num_samples][0].to(device), test_data[:num_samples][1].to(device)) + + # Create DataLoader for test data + test_loader = DataLoader(test_data, batch_size=params["test_batch_size"]) + + # Perform testing + classification_report = test(model=global_model.model, test_loader=test_loader, device=device) + + return { + "node_name": node_name, + "classification_report": str(classification_report), + } + + + def auto_test(self, test_frac=1) -> List[dict]: + """ + Automatically test the FLpipeline on all nodes with the specified test_frac. + + Args: + test_frac (float, optional): The fraction of the test data to use. Default is 1. + + Returns: + List[dict]: A list of dictionaries containing the node names and the classification reports. + + """ + result = [ + self.test_by_node(node, test_frac) + for node in self.server.fed_dataset.test_nodes + ] + self.create("\n".join(str(res).replace("'", '"') for res in result)) + + # stockage des resultats des tests + for entry in result: + node_name = entry['node_name'] + classification_report_str = entry['classification_report'] + + # Convert the 'classification_report' string to a dictionary + classification_report_dict = json.loads(classification_report_str.replace("'", "\"")) + try: + # Insert record into the 'testResults' table + query = CREATE_TEST_RESULTS_QUERY.format( + pipelineId = self.id, + nodeName = node_name , + confusion_matrix = json.dumps(classification_report_dict['confusion matrix']), + accuracy =classification_report_dict['Accuracy'] , + sensivity = classification_report_dict['Sensitivity/Recall'] , + ppv = classification_report_dict['PPV/Precision'] , + npv= classification_report_dict['NPV'] , + f1score= classification_report_dict['F1-score'] , + fpr= classification_report_dict['False positive rate'] , + tpr= classification_report_dict['True positive rate'] + ) + self.eng.execute(text(query)) + except Exception as e: + # This block will catch any other exceptions + print(f"An unexpected error occurred: {e}") + + + + return result diff --git a/build/lib/MEDfl/LearningManager/model.py b/build/lib/MEDfl/LearningManager/model.py new file mode 100644 index 0000000..f68dc73 --- /dev/null +++ b/build/lib/MEDfl/LearningManager/model.py @@ -0,0 +1,224 @@ +#!/usr/bin/env python3 +# froked from https://github.com/pythonlessons/mltu/blob/main/mltu/torch/model.py + +import typing +from collections import OrderedDict +from typing import Dict, List, Optional, Tuple + +import numpy as np +import torch +import torch.nn as nn +from sklearn.metrics import accuracy_score,roc_auc_score + +from .utils import params + + +class Model: + """ + Model class for training and testing PyTorch neural networks. + + Attributes: + model (torch.nn.Module): PyTorch neural network. + optimizer (torch.optim.Optimizer): PyTorch optimizer. + criterion (typing.Callable): Loss function. + """ + + def __init__( + self, + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + criterion: typing.Callable, + ) -> None: + """ + Initialize Model class with the specified model, optimizer, and criterion. + + Args: + model (torch.nn.Module): PyTorch neural network. + optimizer (torch.optim.Optimizer): PyTorch optimizer. + criterion (typing.Callable): Loss function. + """ + self.model = model + self.optimizer = optimizer + self.criterion = criterion + # Get device on which model is running + self.validate() + + def validate(self) -> None: + """ + Validate model and optimizer. + """ + if not isinstance(self.model, torch.nn.Module): + raise TypeError("model argument must be a torch.nn.Module") + + if not isinstance(self.optimizer, torch.optim.Optimizer): + raise TypeError( + "optimizer argument must be a torch.optim.Optimizer" + ) + + def get_parameters(self) -> List[np.ndarray]: + """ + Get the parameters of the model as a list of NumPy arrays. + + Returns: + List[np.ndarray]: The parameters of the model as a list of NumPy arrays. + """ + return [ + val.cpu().numpy() for _, val in self.model.state_dict().items() + ] + + def set_parameters(self, parameters: List[np.ndarray]) -> None: + """ + Set the parameters of the model from a list of NumPy arrays. + + Args: + parameters (List[np.ndarray]): The parameters to be set. + """ + params_dict = zip(self.model.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + self.model.load_state_dict(state_dict, strict=True) + + def train( + self, train_loader, epoch, device, privacy_engine, diff_priv=False + ) -> float: + """ + Train the model on the given train_loader for one epoch. + + Args: + train_loader: The data loader for training data. + epoch (int): The current epoch number. + device: The device on which to perform the training. + privacy_engine: The privacy engine used for differential privacy (if enabled). + diff_priv (bool, optional): Whether differential privacy is used. Default is False. + + Returns: + float: The value of epsilon used in differential privacy. + """ + self.model.train() + epsilon = 0 + losses = [] + top1_acc = [] + + for i, (X_train, y_train) in enumerate(train_loader): + X_train, y_train = X_train.to(device), y_train.to(device) + + self.optimizer.zero_grad() + + # compute output + y_hat = torch.squeeze(self.model(X_train), 1) + loss = self.criterion(y_hat, y_train) + + preds = np.argmax(y_hat.detach().cpu().numpy(), axis=0) + labels = y_train.detach().cpu().numpy() + + # measure accuracy and record loss + acc = (preds == labels).mean() + + losses.append(loss.item()) + top1_acc.append(acc) + + loss.backward() + self.optimizer.step() + + if diff_priv: + epsilon = privacy_engine.get_epsilon(float(params["DELTA"])) + + if (i + 1) % 10 == 0: + if diff_priv: + epsilon = privacy_engine.get_epsilon(float(params["DELTA"])) + print( + f"\tTrain Epoch: {epoch} \t" + f"Loss: {np.mean(losses):.6f} " + f"Acc@1: {np.mean(top1_acc) * 100:.6f} " + f"(ε = {epsilon:.2f}, δ = {params['DELTA']})" + ) + else: + print( + f"\tTrain Epoch: {epoch} \t" + f"Loss: {np.mean(losses):.6f} " + f"Acc@1: {np.mean(top1_acc) * 100:.6f}" + ) + + return epsilon + + def evaluate(self, val_loader, device=torch.device("cpu")) -> Tuple[float, float]: + """ + Evaluate the model on the given validation data. + + Args: + val_loader: The data loader for validation data. + device: The device on which to perform the evaluation. Default is 'cpu'. + + Returns: + Tuple[float, float]: The evaluation loss and accuracy. + """ + correct, total, loss, accuracy, auc = 0, 0, 0.0, [], [] + self.model.eval() + + with torch.no_grad(): + for X_test, y_test in val_loader: + X_test, y_test = X_test.to(device), y_test.to(device) # Move data to device + + y_hat = torch.squeeze(self.model(X_test), 1) + + + criterion = self.criterion.to(y_hat.device) + loss += criterion(y_hat, y_test).item() + + + # Move y_hat to CPU for accuracy computation + y_hat_cpu = y_hat.cpu().detach().numpy() + accuracy.append(accuracy_score(y_test.cpu().numpy(), y_hat_cpu.round())) + + # Move y_test to CPU for AUC computation + y_test_cpu = y_test.cpu().numpy() + y_prob_cpu = y_hat.cpu().detach().numpy() + if (len(np.unique(y_test_cpu)) != 1): + auc.append(roc_auc_score(y_test_cpu, y_prob_cpu)) + + total += y_test.size(0) + correct += np.sum(y_hat_cpu.round() == y_test_cpu) + + loss /= len(val_loader.dataset) + return loss, np.mean(accuracy), np.mean(auc) + + + @staticmethod + def save_model(model , model_name:str): + """ + Saves a PyTorch model to a file. + + Args: + model (torch.nn.Module): PyTorch model to be saved. + model_name (str): Name of the model file. + + Raises: + Exception: If there is an issue during the saving process. + + Returns: + None + """ + try: + torch.save(model, '../../notebooks/.ipynb_checkpoints/trainedModels/' + model_name + ".pth") + except Exception as e: + raise Exception(f"Error saving the model: {str(e)}") + + @staticmethod + def load_model(model_path: str): + """ + Loads a PyTorch model from a file. + + Args: + model_path (str): Path to the model file to be loaded. + + Returns: + torch.nn.Module: Loaded PyTorch model. + """ + # Ensure models are loaded onto the CPU when CUDA is not available + torch_kwargs = {"weights_only": False} + if torch.cuda.is_available(): + loaded_model = torch.load(model_path , **torch_kwargs) + else: + loaded_model = torch.load(model_path, map_location=torch.device('cpu') , **torch_kwargs) + return loaded_model + + diff --git a/build/lib/MEDfl/LearningManager/params.yaml b/build/lib/MEDfl/LearningManager/params.yaml new file mode 100644 index 0000000..83fc6fa --- /dev/null +++ b/build/lib/MEDfl/LearningManager/params.yaml @@ -0,0 +1,14 @@ +DELTA: 1.0e-05 +EPSILON: 5.0 +MAX_GRAD_NORM: 1.0 +diff_privacy: true +lr: 0.001 +min_evalclient: 2 +num_rounds: 20 +optimizer: SGD +path_to_master_csv: /home/local/USHERBROOKE/saho6810/MEDfl/code/MEDfl/notebooks/data/masterDataSet/miniDiabete.csv +path_to_test_csv: /home/local/USHERBROOKE/saho6810/MEDfl/code/MEDfl/notebooks/data/masterDataSet/Mimic_train.csv +task: BinaryClassification +test_batch_size: 1 +train_batch_size: 32 +train_epochs: 20 diff --git a/build/lib/MEDfl/LearningManager/params_optimiser.py b/build/lib/MEDfl/LearningManager/params_optimiser.py new file mode 100644 index 0000000..4d4a1ea --- /dev/null +++ b/build/lib/MEDfl/LearningManager/params_optimiser.py @@ -0,0 +1,442 @@ +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +import seaborn as sns +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +from torch.utils.data import TensorDataset, DataLoader +from sklearn.model_selection import GridSearchCV, train_test_split +from sklearn.base import BaseEstimator +from sklearn.metrics import make_scorer, precision_score, recall_score, accuracy_score, f1_score,roc_auc_score, balanced_accuracy_score +import optuna + +from MEDfl.LearningManager.model import Model +from MEDfl.LearningManager.strategy import Strategy +from MEDfl.LearningManager.server import FlowerServer +from MEDfl.LearningManager.flpipeline import FLpipeline + +class BinaryClassifier(nn.Module): + def __init__(self, input_size, num_layers, layer_size): + super(BinaryClassifier, self).__init__() + + # Input layer + self.layers = [nn.Linear(input_size, layer_size)] + + # Hidden layers + for _ in range(num_layers - 1): + self.layers.append(nn.Linear(layer_size, layer_size)) + + # Output layer + self.layers.append(nn.Linear(layer_size, 1)) + + # ModuleList to handle dynamic number of layers + self.layers = nn.ModuleList(self.layers) + + def forward(self, x): + for layer in self.layers[:-1]: + x = F.relu(layer(x)) + x = self.layers[-1](x) + return x + +class CustomPyTorchClassifier(BaseEstimator): + def __init__(self, hidden_dim=10, lr=0.001, pos_weight=1, th=0.5, max_epochs=10, batch_size=32): + self.hidden_dim = hidden_dim + self.lr = lr + self.pos_weight = pos_weight + self.max_epochs = max_epochs + self.batch_size = batch_size + self.th = th + self.model = None + + def fit(self, X, y): + if isinstance(X, torch.Tensor): + X = X.numpy() + if isinstance(y, torch.Tensor): + y = y.numpy() + + input_dim = X.shape[1] + self.model = nn.Sequential( + nn.Linear(input_dim, self.hidden_dim), + nn.ReLU(), + nn.Linear(self.hidden_dim, 1), + nn.Sigmoid() + ) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(self.pos_weight)) + optimizer = optim.Adam(self.model.parameters(), lr=self.lr) + + train_data = TensorDataset(torch.from_numpy(X).float(), torch.from_numpy(y).float()) + train_loader = DataLoader(train_data, batch_size=self.batch_size, shuffle=True) + + self.model.train() + for epoch in range(self.max_epochs): + for inputs, labels in train_loader: + optimizer.zero_grad() + outputs = self.model(inputs) + loss = criterion(outputs.squeeze(), labels) + loss.backward() + optimizer.step() + return self + + def predict(self, X): + if isinstance(X, torch.Tensor): + X = X.numpy() + + self.model.eval() + with torch.no_grad(): + outputs = self.model(torch.from_numpy(X).float()) + predictions = (outputs.squeeze() > self.th).float().numpy() + return predictions + + def score(self, X, y): + predictions = self.predict(X) + return accuracy_score(y, predictions) + + +class ParamsOptimiser: + def __init__(self, X_train = None, y_train=None, X_test=None, y_test=None): + if isinstance(X_train, pd.DataFrame): + X_train = X_train.to_numpy() + if isinstance(y_train, pd.Series): + y_train = y_train.to_numpy() + if isinstance(X_test, pd.DataFrame): + X_test = X_test.to_numpy() + if isinstance(y_test, pd.Series): + y_test = y_test.to_numpy() + + self.X_train = X_train + self.y_train = y_train + self.X_test = X_test + self.y_test = y_test + + def perform_grid_search(self, param_grid, scoring_metric='recall', cv=3, verbose=1): + pytorch_model = CustomPyTorchClassifier() + scorer = make_scorer(recall_score, greater_is_better=True) + + if scoring_metric == 'precision': + scorer = make_scorer(precision_score) + elif scoring_metric == 'accuracy': + scorer = make_scorer(accuracy_score) + elif scoring_metric == 'recall': + scorer = make_scorer(recall_score) + elif scoring_metric == 'f1': + scorer = make_scorer(f1_score) + + grid_search = GridSearchCV(pytorch_model, param_grid, scoring=scorer, cv=cv, refit=scoring_metric, verbose=verbose) + grid_search.fit(self.X_train, self.y_train) + + self.grid_search_results = grid_search # Save the grid search results + + return grid_search + + # Inside the CustomModelTrainer class + def plot_results(self, params_to_plot=None): + results = pd.DataFrame(self.grid_search_results.cv_results_) + + if params_to_plot is None: + # Create a column for configuration details + results['config'] = results['params'].apply(lambda x: str(x)) + + # Visualize mean test scores along with configurations + plt.figure(figsize=(15, 8)) + bar_plot = plt.bar(results.index, results['mean_test_score'], color='blue', alpha=0.7) + plt.xticks(results.index, results['config'], rotation='vertical', fontsize=8) + plt.ylabel('Mean Test Score') + plt.title('Mean Test Scores for Each Configuration') + plt.tight_layout() + + # Add values on top of bars + for bar, score in zip(bar_plot, results['mean_test_score']): + plt.text(bar.get_x() + bar.get_width() / 2 - 0.15, bar.get_height() + 0.01, f'{score:.3f}', fontsize=8) + + plt.show() + return + + try: + # Dynamically get the column names for the specified scoring metric + mean_test_col = f'mean_test_{params_to_plot[0]}' + param_cols = [f'param_{param}' for param in params_to_plot] + + if len(params_to_plot) == 1: + # Plotting the heatmap for a single parameter + plt.figure(figsize=(8, 6)) + sns.heatmap(results.pivot_table(index=param_cols[0]), + annot=True, cmap='YlGnBu', fmt=".3f", cbar_kws={'label': mean_test_col}) + plt.title(mean_test_col.capitalize()) + plt.show() + elif len(params_to_plot) == 2: + # Create a pair plot for two parameters + plt.figure(figsize=(8, 6)) + scores = results.pivot_table(index=param_cols[0], columns=param_cols[1], values=f'mean_test_score', aggfunc="mean") + sns.heatmap(scores, annot=True, cmap='YlGnBu', fmt=".3f", cbar_kws={'label': mean_test_col}) + plt.title(mean_test_col.capitalize()) + plt.show() + else: + print("Invalid number of parameters to plot. You can provide either one or two parameters.") + except KeyError as e: + print(f"Error: {e}. Make sure the specified scoring metric exists in the results DataFrame.") + + + + def optuna_optimisation(self, direction, params): + # Create the data loaders here + train_data = TensorDataset(torch.from_numpy(self.X_train).float(), torch.from_numpy(self.y_train).float()) + test_data = TensorDataset(torch.from_numpy(self.X_test).float(), torch.from_numpy(self.y_test).float()) + + + + def objective(trial): + + batch_size=trial.suggest_int('batch_size', **params['batch_size']) + + train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True) + test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False) + + # Create the model with the suggested hyperparameters + model = BinaryClassifier(input_size=self.X_train.shape[1], + num_layers=trial.suggest_int('num_layers', **params['num_layers']) , + layer_size=trial.suggest_int('hidden_size', **params['hidden_size'])) + + # Define the loss function and optimizer + criterion = nn.BCEWithLogitsLoss() + optimizer_name = trial.suggest_categorical('optimizer', params['optimizer']) + learning_rate = trial.suggest_float('learning_rate', **params['learning_rate']) + + + if optimizer_name == 'Adam': + optimizer = optim.Adam(model.parameters(), lr=learning_rate) + elif optimizer_name == 'SGD': + optimizer = optim.SGD(model.parameters(), lr=learning_rate) + elif optimizer_name == 'RMSprop': + optimizer = optim.RMSprop(model.parameters(), lr=learning_rate) + + # Training loop + num_epochs = trial.suggest_int('num_epochs', **params['num_epochs']) + for epoch in range(num_epochs): + model.train() + for batch_X, batch_y in train_loader: + optimizer.zero_grad() + outputs = model(batch_X) + loss = criterion(outputs.squeeze(), batch_y) + loss.backward() + optimizer.step() + + # Evaluation + model.eval() + predictions = [] + true_labels = [] + with torch.no_grad(): + for batch_X, batch_y in test_loader: + outputs = model(batch_X) + predictions.extend(torch.sigmoid(outputs).numpy()) + true_labels.extend(batch_y.numpy()) + + # Calculate F1 score + # f1 = f1_score(true_labels, (np.array(predictions) > 0.5).astype(int)) + auc = roc_auc_score(true_labels, predictions) + + trial.report(auc, epoch) + + # Handle pruning based on the intermediate value + if trial.should_prune(): + raise optuna.TrialPruned() + + return auc + + # Create an Optuna study + study = optuna.create_study(direction=direction) + study.optimize(objective, n_trials=params['n_trials']) + + self.study = study + + # Get the best hyperparameters + best_params = study.best_params + print(f"Best Hyperparameters: {best_params}") + + return study + + def train_optimized_model(self ,trial ,th_min , th_max): + + best_params = self.study.best_params + + threshold = trial.suggest_float('threashhold', th_min, th_max, log=True) + + train_data = TensorDataset(torch.from_numpy(self.X_train).float(), torch.from_numpy(self.y_train).float()) + test_data = TensorDataset(torch.from_numpy(self.X_test).float(), torch.from_numpy(self.y_test).float()) + + train_loader = DataLoader(train_data, batch_size=best_params['batch_size'], shuffle=True) + test_loader = DataLoader(test_data, batch_size=best_params['batch_size'], shuffle=False) + + + # Use the best hyperparameters to train the final model + final_model = BinaryClassifier(input_size=self.X_train.shape[1], layer_size=best_params['hidden_size'] , num_layers=best_params['num_layers']) + final_optimizer = self.get_optimizer(best_params['optimizer'], final_model.parameters(), best_params['learning_rate']) + final_criterion = nn.BCEWithLogitsLoss() + + num_epochs = best_params['num_epochs'] + for epoch in range(num_epochs): + final_model.train() + for batch_X, batch_y in train_loader: + final_optimizer.zero_grad() + outputs = final_model(batch_X) + loss = final_criterion(outputs.squeeze(), batch_y) + loss.backward() + final_optimizer.step() + + # Evaluate the final model on the test set + final_model.eval() + with torch.no_grad(): + predictions = [] + true_labels = [] + for batch_X, batch_y in test_loader: + outputs = final_model(batch_X) + predictions.extend(torch.sigmoid(outputs).numpy()) + true_labels.extend(batch_y.numpy()) + + final_balanced_acc = balanced_accuracy_score(true_labels, (np.array(predictions) > threshold).astype(int)) + print(f"Model balanced accuracy: {final_balanced_acc}") + + return final_balanced_acc + + def get_optimizer(self, optimizer_name, parameters, learning_rate): + if optimizer_name == 'Adam': + return optim.Adam(parameters, lr=learning_rate) + elif optimizer_name == 'SGD': + return optim.SGD(parameters, lr=learning_rate) + elif optimizer_name == 'RMSprop': + return optim.RMSprop(parameters, lr=learning_rate) + else: + raise ValueError(f"Unknown optimizer: {optimizer_name}") + + def perform_grid_search(self, param_grid, scoring_metric='recall', cv=3, verbose=1): + pytorch_model = CustomPyTorchClassifier() + scorer = make_scorer(recall_score, greater_is_better=True) + + if scoring_metric == 'precision': + scorer = make_scorer(precision_score) + elif scoring_metric == 'accuracy': + scorer = make_scorer(accuracy_score) + elif scoring_metric == 'recall': + scorer = make_scorer(recall_score) + elif scoring_metric == 'f1': + scorer = make_scorer(f1_score) + + grid_search = GridSearchCV(pytorch_model, param_grid, scoring=scorer, cv=cv, refit=scoring_metric, verbose=verbose) + grid_search.fit(self.X_train, self.y_train) + + self.grid_search_results = grid_search # Save the grid search results + + return grid_search + + + def plot_param_importances(self): + return optuna.visualization.plot_param_importances(self.study) + + def plot_slice(self , params): + return optuna.visualization.plot_slice(self.study , params=params) + + def plot_parallel_coordinate(self): + return optuna.visualization.plot_parallel_coordinate(self.study) + + def plot_rank(self , params=None): + return optuna.visualization.plot_rank(self.study , params=params) + + def plot_optimization_history(self): + return optuna.visualization.plot_optimization_history(self.study) + + def optimize_model_threashhold(self , n_trials , th_min , th_max): + additional_params = {'th_min': th_min, 'th_max': th_max} + + th_study = optuna.create_study(direction='maximize') + th_study.optimize(lambda trial: self.train_optimized_model(trial , **additional_params) , n_trials) + + # Get the best hyperparameters + best_params = th_study.best_params + print(f"Best Hyperparameters: {best_params}") + + return optuna.visualization.plot_rank(th_study , params=['threashhold']) + + def federated_params_iptim(self , params , direction, model, fl_dataset): + + def objective(trial): + + criterion = nn.BCEWithLogitsLoss() + + optimizer_name = trial.suggest_categorical('optimizer', params['optimizer']) + learning_rate = trial.suggest_float('learning_rate', **params['learning_rate']) + num_rounds = trial.suggest_int('num_rounds', **params['num_rounds']) + diff_privacy = trial.suggest_int('diff_privacy', **params['diff_privacy']) + diff_privacy = True if diff_privacy == 1 else False + + if optimizer_name == 'Adam': + optimizer = optim.Adam(model.parameters(), lr=learning_rate) + elif optimizer_name == 'SGD': + optimizer = optim.SGD(model.parameters(), lr=learning_rate) + elif optimizer_name == 'RMSprop': + optimizer = optim.RMSprop(model.parameters(), lr=learning_rate) + + # Creating a new Model instance using the specific model created by DynamicModel + global_model = Model(model, optimizer, criterion) + + # Get the initial params of the model + init_params = global_model.get_parameters() + + fl_strategy = trial.suggest_categorical('fl_strategy', params['fl_strategy']) + + learning_strategy = Strategy(fl_strategy, + fraction_fit = 1.0 , + fraction_evaluate = 1.0, + min_fit_clients = 2, + min_evaluate_clients = 2, + min_available_clients = 2 , + initial_parameters=init_params) + + learning_strategy.create_strategy() + + # Create The server + server = FlowerServer(global_model, strategy = learning_strategy, num_rounds = num_rounds, + num_clients = len(fl_dataset.trainloaders), + fed_dataset = fl_dataset,diff_privacy = diff_privacy, + # You can change the resources alocated for each client based on your machine + client_resources={'num_cpus': 1.0, 'num_gpus': 0.0} + ) + + ppl_1 = FLpipeline( name ="the first fl_pipeline",description = "this is our first FL pipeline", + server = server) + + # Run the Traning of the model + history = ppl_1.server.run() + + return server.auc[len(server.auc)-1] + + + + study = optuna.create_study(direction=direction) + study.optimize(objective, n_trials=params['n_trials']) + + self.study = study + + # Get the best hyperparameters + best_params = study.best_params + print(f"Best Hyperparameters: {best_params}") + + return study + + + + + + + + + + + + + + + + + diff --git a/build/lib/MEDfl/LearningManager/plot.py b/build/lib/MEDfl/LearningManager/plot.py new file mode 100644 index 0000000..c5f6c6e --- /dev/null +++ b/build/lib/MEDfl/LearningManager/plot.py @@ -0,0 +1,229 @@ +import matplotlib.pyplot as plt +import numpy as np +import seaborn as sns + +from .utils import * + +# Replace this with your actual code for data collection +results_dict = { + ("LR: 0.001, Optimizer: Adam", "accuracy"): [0.85, 0.89, 0.92, 0.94, ...], + ("LR: 0.001, Optimizer: Adam", "loss"): [0.2, 0.15, 0.1, 0.08, ...], + ("LR: 0.01, Optimizer: SGD", "accuracy"): [0.88, 0.91, 0.93, 0.95, ...], + ("LR: 0.01, Optimizer: SGD", "loss"): [0.18, 0.13, 0.09, 0.07, ...], + ("LR: 0.1, Optimizer: Adam", "accuracy"): [0.82, 0.87, 0.91, 0.93, ...], + ("LR: 0.1, Optimizer: Adam", "loss"): [0.25, 0.2, 0.15, 0.12, ...], +} +""" +server should have: + #len = num of rounds + self.accuracies + self.losses + +Client should have + # len = num of epochs + self.accuracies + self.losses + self.epsilons + self.deltas + +#common things : LR,SGD, Aggregation + +""" + + +class AccuracyLossPlotter: + """ + A utility class for plotting accuracy and loss metrics based on experiment results. + + Args: + results_dict (dict): Dictionary containing experiment results organized by parameters and metrics. + + Attributes: + results_dict (dict): Dictionary containing experiment results organized by parameters and metrics. + parameters (list): List of unique parameters in the experiment results. + metrics (list): List of unique metrics in the experiment results. + iterations (range): Range of iterations (rounds or epochs) in the experiment. + """ + + def __init__(self, results_dict): + """ + Initialize the AccuracyLossPlotter with experiment results. + + Args: + results_dict (dict): Dictionary containing experiment results organized by parameters and metrics. + """ + self.results_dict = results_dict + self.parameters = list( + set([param[0] for param in results_dict.keys()]) + ) + self.metrics = list(set([param[1] for param in results_dict.keys()])) + self.iterations = range(1, len(list(results_dict.values())[0]) + 1) + + def plot_accuracy_loss(self): + """ + Plot accuracy and loss metrics for different parameters. + """ + + plt.figure(figsize=(8, 6)) + + for param in self.parameters: + for metric in self.metrics: + key = (param, metric) + values = self.results_dict[key] + plt.plot( + self.iterations, + values, + label=f"{param} ({metric})", + marker="o", + linestyle="-", + ) + + plt.xlabel("Rounds") + plt.ylabel("Accuracy / Loss") + plt.title("Accuracy and Loss by Parameters") + plt.legend() + plt.grid(True) + plt.show() + + @staticmethod + def plot_global_confusion_matrix(pipeline_name: str): + """ + Plot a global confusion matrix based on pipeline results. + + Args: + pipeline_name (str): Name of the pipeline. + + Returns: + None + """ + # Get the id of the pipeline by name + pipeline_id = get_pipeline_from_name(pipeline_name) + # get the confusion matrix pf the pipeline + confusion_matrix = get_pipeline_confusion_matrix(pipeline_id) + + # Extracting confusion matrix values + TP = confusion_matrix['TP'] + FP = confusion_matrix['FP'] + FN = confusion_matrix['FN'] + TN = confusion_matrix['TN'] + + # Creating a matrix for visualization + matrix = [[TN, FP], + [FN, TP]] + + # Plotting the confusion matrix as a heatmap + plt.figure(figsize=(6, 4)) + sns.heatmap(matrix, annot=True, fmt='d', cmap='Blues', + xticklabels=['Predicted Negative', 'Predicted Positive'], + yticklabels=['Actual Negative', 'Actual Positive']) + plt.title('Global Confusion Matrix') + plt.xlabel('Predicted label') + plt.ylabel('True label') + plt.tight_layout() + + # Display the confusion matrix heatmap + plt.show() + + @staticmethod + def plot_confusion_Matrix_by_node(node_name: str, pipeline_name: str): + """ + Plot a confusion matrix for a specific node in the pipeline. + + Args: + node_name (str): Name of the node. + pipeline_name (str): Name of the pipeline. + + Returns: + None + """ + + # Get the id of the pipeline by name + pipeline_id = get_pipeline_from_name(pipeline_name) + # get the confusion matrix pf the pipeline + confusion_matrix = get_node_confusion_matrix( + pipeline_id, node_name=node_name) + + # Extracting confusion matrix values + TP = confusion_matrix['TP'] + FP = confusion_matrix['FP'] + FN = confusion_matrix['FN'] + TN = confusion_matrix['TN'] + + # Creating a matrix for visualization + matrix = [[TN, FP], + [FN, TP]] + + # Plotting the confusion matrix as a heatmap + plt.figure(figsize=(6, 4)) + sns.heatmap(matrix, annot=True, fmt='d', cmap='Blues', + xticklabels=['Predicted Negative', 'Predicted Positive'], + yticklabels=['Actual Negative', 'Actual Positive']) + plt.title('Confusion Matrix of node: '+node_name) + plt.xlabel('Predicted label') + plt.ylabel('True label') + plt.tight_layout() + + # Display the confusion matrix heatmap + plt.show() + return + + @staticmethod + def plot_classification_report(pipeline_name: str): + """ + Plot a comparison of classification report metrics between nodes. + + Args: + pipeline_name (str): Name of the pipeline. + + Returns: + None + """ + + colors = ['#FF5733', '#6A5ACD', '#3CB371', '#FFD700', '#FFA500', '#8A2BE2', '#00FFFF', '#FF00FF', '#A52A2A', '#00FF00'] + + # Get the id of the pipeline by name + pipeline_id = get_pipeline_from_name(pipeline_name) + + pipeline_results = get_pipeline_result(pipeline_id) + + nodesList = pipeline_results['nodename'] + classificationReports = [] + + for index, node in enumerate(nodesList): + classificationReports.append({ + 'Accuracy': pipeline_results['accuracy'][index], + 'Sensitivity/Recall': pipeline_results['sensivity'][index], + 'PPV/Precision': pipeline_results['ppv'][index], + 'NPV': pipeline_results['npv'][index], + 'F1-score': pipeline_results['f1score'][index], + 'False positive rate': pipeline_results['fpr'][index], + 'True positive rate': pipeline_results['tpr'][index] + }) + + metric_labels = list(classificationReports[0].keys()) # Assuming both reports have the same keys + + # Set the positions of the bars on the x-axis + x = np.arange(len(metric_labels)) + + # Set the width of the bars + width = 0.35 + + plt.figure(figsize=(12, 6)) + + for index, report in enumerate(classificationReports): + metric = list(report.values()) + plt.bar(x + (index - len(nodesList) / 2) * width / len(nodesList), metric, width / len(nodesList), + label=nodesList[index], color=colors[index % len(colors)]) + + # Adding labels, title, and legend + plt.xlabel('Metrics') + plt.ylabel('Values') + plt.title('Comparison of Classification Report Metrics between Nodes') + plt.xticks(ticks=x, labels=metric_labels, rotation=45) + plt.legend() + + # Show plot + plt.tight_layout() + plt.show() + + return diff --git a/build/lib/MEDfl/LearningManager/server.py b/build/lib/MEDfl/LearningManager/server.py new file mode 100644 index 0000000..fa88c10 --- /dev/null +++ b/build/lib/MEDfl/LearningManager/server.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python3 + +import copy +from typing import Dict, Optional, Tuple + +import flwr as fl +import torch + +from .client import FlowerClient +from .federated_dataset import FederatedDataset +from .model import Model +from .strategy import Strategy + + +class FlowerServer: + """ + A class representing the central server for Federated Learning using Flower. + + Attributes: + global_model (Model): The global model that will be federated among clients. + strategy (Strategy): The strategy used for federated learning, specifying communication and aggregation methods. + num_rounds (int): The number of federated learning rounds to perform. + num_clients (int): The number of clients participating in the federated learning process. + fed_dataset (FederatedDataset): The federated dataset used for training and evaluation. + diff_priv (bool): Whether differential privacy is used during the federated learning process. + accuracies (List[float]): A list to store the accuracy of the global model during each round. + losses (List[float]): A list to store the loss of the global model during each round. + flower_clients (List[FlowerClient]): A list to store the FlowerClient objects representing individual clients. + + """ + + def __init__( + self, + global_model: Model, + strategy: Strategy, + num_rounds: int, + num_clients: int, + fed_dataset: FederatedDataset, + diff_privacy: bool = False, + client_resources: Optional[Dict[str, float]] = {'num_cpus': 1, 'num_gpus': 0.0} + ) -> None: + """ + Initialize a FlowerServer object with the specified parameters. + + Args: + global_model (Model): The global model that will be federated among clients. + strategy (Strategy): The strategy used for federated learning, specifying communication and aggregation methods. + num_rounds (int): The number of federated learning rounds to perform. + num_clients (int): The number of clients participating in the federated learning process. + fed_dataset (FederatedDataset): The federated dataset used for training and evaluation. + diff_privacy (bool, optional): Whether differential privacy is used during the federated learning process. + Default is False. + """ + self.device = torch.device( + f"cuda" if torch.cuda.is_available() else "cpu" + ) + self.global_model = global_model + self.params = global_model.get_parameters() + self.global_model.model = global_model.model.to(self.device) + self.num_rounds = num_rounds + self.num_clients = num_clients + self.fed_dataset = fed_dataset + self.strategy = strategy + self.client_resources = client_resources + setattr( + self.strategy.strategy_object, + "min_available_clients", + self.num_clients, + ) + setattr( + self.strategy.strategy_object, + "initial_parameters", + fl.common.ndarrays_to_parameters(self.params), + ) + setattr(self.strategy.strategy_object, "evaluate_fn", self.evaluate) + self.fed_dataset = fed_dataset + self.diff_priv = diff_privacy + self.accuracies = [] + self.losses = [] + self.auc = [] + self.flower_clients = [] + self.validate() + + def validate(self) -> None: + """Validate global_model, strategy, num_clients, num_rounds, fed_dataset, diff_privacy""" + if not isinstance(self.global_model, Model): + raise TypeError("global_model argument must be a Model instance") + + # if not isinstance(self.strategy, Strategy): + # print(self.strategy) + # print(isinstance(self.strategy, Strategy)) + # raise TypeError("strategy argument must be a Strategy instance") + + if not isinstance(self.num_clients, int): + raise TypeError("num_clients argument must be an int") + + if not isinstance(self.num_rounds, int): + raise TypeError("num_rounds argument must be an int") + + if not isinstance(self.diff_priv, bool): + raise TypeError("diff_priv argument must be a bool") + + def client_fn(self, cid) -> FlowerClient: + """ + Return a FlowerClient object for a specific client ID. + + Args: + cid: The client ID. + + Returns: + FlowerClient: A FlowerClient object representing the individual client. + """ + + device = torch.device( + f"cuda:{int(cid) % 4}" if torch.cuda.is_available() else "cpu" + ) + client_model = copy.deepcopy(self.global_model) + + trainloader = self.fed_dataset.trainloaders[int(cid)] + valloader = self.fed_dataset.valloaders[int(cid)] + # this helps in making plots + + client = FlowerClient( + cid, client_model, trainloader, valloader, self.diff_priv + ) + self.flower_clients.append(client) + return client + + def evaluate( + self, + server_round: int, + parameters: fl.common.NDArrays, + config: Dict[str, fl.common.Scalar], + ) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]: + """ + Evaluate the global model on the validation dataset and update the accuracies and losses. + + Args: + server_round (int): The current round of the federated learning process. + parameters (fl.common.NDArrays): The global model parameters. + config (Dict[str, fl.common.Scalar]): Configuration dictionary. + + Returns: + Optional[Tuple[float, Dict[str, fl.common.Scalar]]]: The evaluation loss and accuracy. + """ + testloader = self.fed_dataset.valloaders[0] + + self.global_model.set_parameters( + parameters + ) # Update model with the latest parameters + loss, accuracy ,auc = self.global_model.evaluate(testloader, self.device) + self.auc.append(auc) + self.losses.append(loss) + self.accuracies.append(accuracy) + + return loss, {"accuracy": accuracy} + + def run(self) -> None: + """ + Run the federated learning process using Flower simulation. + + Returns: + History: The history of the accuracies and losses during the training of each node + """ + # Increase the object store memory to the minimum allowed value or higher + ray_init_args = {"include_dashboard": False + , "object_store_memory": 78643200 + } + self.fed_dataset.eng = None + + history = fl.simulation.start_simulation( + client_fn=self.client_fn, + num_clients=self.num_clients, + config=fl.server.ServerConfig(self.num_rounds), + strategy=self.strategy.strategy_object, + ray_init_args=ray_init_args, + client_resources = self.client_resources + ) + + return history + diff --git a/build/lib/MEDfl/LearningManager/strategy.py b/build/lib/MEDfl/LearningManager/strategy.py new file mode 100644 index 0000000..038d8e6 --- /dev/null +++ b/build/lib/MEDfl/LearningManager/strategy.py @@ -0,0 +1,82 @@ + +from collections import OrderedDict +from typing import Dict, List, Optional, Tuple + +import flwr as fl +import numpy as np + +import optuna + + + + +class Strategy: + """ + A class representing a strategy for Federated Learning. + + Attributes: + name (str): The name of the strategy. Default is "FedAvg". + fraction_fit (float): Fraction of clients to use for training during each round. Default is 1.0. + fraction_evaluate (float): Fraction of clients to use for evaluation during each round. Default is 1.0. + min_fit_clients (int): Minimum number of clients to use for training during each round. Default is 2. + min_evaluate_clients (int): Minimum number of clients to use for evaluation during each round. Default is 2. + min_available_clients (int): Minimum number of available clients required to start a round. Default is 2. + initial_parameters (Optional[]): The initial parameters of the server model + Methods: + + """ + + def __init__( + self, + name: str = "FedAvg", + fraction_fit: float = 1.0, + fraction_evaluate: float = 1.0, + min_fit_clients: int = 2, + min_evaluate_clients: int = 2, + min_available_clients: int = 2, + initial_parameters = [], + evaluation_methode = "centralized" + ) -> None: + """ + Initialize a Strategy object with the specified parameters. + + Args: + name (str): The name of the strategy. Default is "FedAvg". + fraction_fit (float): Fraction of clients to use for training during each round. Default is 1.0. + fraction_evaluate (float): Fraction of clients to use for evaluation during each round. Default is 1.0. + min_fit_clients (int): Minimum number of clients to use for training during each round. Default is 2. + min_evaluate_clients (int): Minimum number of clients to use for evaluation during each round. Default is 2. + min_available_clients (int): Minimum number of available clients required to start a round. Default is 2. + initial_parameters (Optional[]): The initial parametres of the server model + evaluation_methode ( "centralized" | "distributed") + """ + self.fraction_fit = fraction_fit + self.fraction_evaluate = fraction_evaluate + self.min_fit_clients = min_fit_clients + self.min_evaluate_clients = min_evaluate_clients + self.min_available_clients = min_available_clients + self.initial_parameters = initial_parameters + self.evaluate_fn = None + self.name = name + + def optuna_fed_optimization(self, direction:str , hpo_rate:int , params_config): + self.study = optuna.create_study(direction=direction) + self.hpo_rate = hpo_rate + self.params_config = params_config + + + def create_strategy(self): + self.strategy_object = self.get_strategy_by_name()( + fraction_fit=self.fraction_fit, + fraction_evaluate=self.fraction_evaluate, + min_fit_clients=self.min_fit_clients, + min_evaluate_clients=self.min_evaluate_clients, + min_available_clients=self.min_available_clients, + initial_parameters=fl.common.ndarrays_to_parameters(self.initial_parameters), + evaluate_fn=self.evaluate_fn + ) + def get_strategy_by_name(self): + return eval(f"fl.server.strategy.{self.name}") + + + diff --git a/build/lib/MEDfl/LearningManager/utils.py b/build/lib/MEDfl/LearningManager/utils.py new file mode 100644 index 0000000..a5bd4dd --- /dev/null +++ b/build/lib/MEDfl/LearningManager/utils.py @@ -0,0 +1,331 @@ +#!/usr/bin/env python3 + +import pkg_resources +import torch +import yaml +from sklearn.metrics import * +from yaml.loader import SafeLoader + + +from MEDfl.NetManager.database_connector import DatabaseManager + +# from scripts.base import * +import json + + +import pandas as pd +import numpy as np + +import os +import configparser + +import subprocess +import ast + +from sqlalchemy import text + + +# Get the directory of the current script +current_directory = os.path.dirname(os.path.abspath(__file__)) + +# Load configuration from the config file +yaml_path = os.path.join(current_directory, 'params.yaml') + +with open(yaml_path) as g: + params = yaml.load(g, Loader=SafeLoader) + +# global_yaml_path = pkg_resources.resource_filename(__name__, "../../global_params.yaml") +# with open(global_yaml_path) as g: +# global_params = yaml.load(g, Loader=SafeLoader) + + +# Default path for the config file +DEFAULT_CONFIG_PATH = 'db_config.ini' + + +def load_db_config_dep(): + config = os.environ.get('MEDfl_DB_CONFIG') + + if config: + return ast.literal_eval(config) + else: + raise ValueError(f"MEDfl db config not found") + +# Function to allow users to set config path programmatically + + +def set_db_config_dep(config_path): + config = configparser.ConfigParser() + config.read(config_path) + if (config['sqllite']): + os.environ['MEDfl_DB_CONFIG'] = str(dict(config['sqllite'])) + else: + raise ValueError(f"mysql key not found in file '{config_path}'") + + + +def load_db_config(): + """Read a dictionary from an environment variable.""" + obj_str = os.getenv("MEDfl_DB_CONFIG") + if obj_str is not None: + return ast.literal_eval(obj_str) + else: + raise ValueError(f"Environment variable MEDfl_DB_CONFIG not found") + +# Function to allow users to set config path programmatically + + +def set_db_config(config_path): + obj = {"database" : config_path} + + """Store a dictionary as a string in an environment variable.""" + obj_str = str(obj) + os.environ['MEDfl_DB_CONFIG'] = obj_str + + + + + + +# Create databas + + +def create_MEDfl_db(): + script_path = os.path.join(os.path.dirname( + __file__), 'scripts', 'create_db.sh') + subprocess.run(['sh', script_path], check=True) + + +def custom_classification_report(y_true, y_pred_prob): + """ + Compute custom classification report metrics including accuracy, sensitivity, specificity, precision, NPV, + F1-score, false positive rate, and true positive rate. + + Args: + y_true (array-like): True labels. + y_pred (array-like): Predicted labels. + + Returns: + dict: A dictionary containing custom classification report metrics. + """ + y_pred = (y_pred_prob).round( + ) # Round absolute values of predicted probabilities to the nearest integer + + auc = roc_auc_score(y_true, y_pred_prob) # Calculate AUC + + tn, fp, fn, tp = confusion_matrix(y_true, y_pred , labels=[0, 1]).ravel() + + # Accuracy + denominator_acc = tp + tn + fp + fn + acc = (tp + tn) / denominator_acc if denominator_acc != 0 else 0.0 + + # Sensitivity/Recall + denominator_sen = tp + fn + sen = tp / denominator_sen if denominator_sen != 0 else 0.0 + + # Specificity + denominator_sp = tn + fp + sp = tn / denominator_sp if denominator_sp != 0 else 0.0 + + # PPV/Precision + denominator_ppv = tp + fp + ppv = tp / denominator_ppv if denominator_ppv != 0 else 0.0 + +# NPV + denominator_npv = tn + fn + npv = tn / denominator_npv if denominator_npv != 0 else 0.0 + + # F1 Score + denominator_f1 = sen + ppv + f1 = 2 * (sen * ppv) / denominator_f1 if denominator_f1 != 0 else 0.0 + + # False Positive Rate + denominator_fpr = fp + tn + fpr = fp / denominator_fpr if denominator_fpr != 0 else 0.0 + + # True Positive Rate + denominator_tpr = tp + fn + tpr = tp / denominator_tpr if denominator_tpr != 0 else 0.0 + + return { + "confusion matrix": {"TP": tp, "FP": fp, "FN": fn, "TN": tn}, + "Accuracy": round(acc, 3), + "Sensitivity/Recall": round(sen, 3), + "Specificity": round(sp, 3), + "PPV/Precision": round(ppv, 3), + "NPV": round(npv, 3), + "F1-score": round(f1, 3), + "False positive rate": round(fpr, 3), + "True positive rate": round(tpr, 3), + "auc": auc + } + + +def test(model, test_loader, device=torch.device("cpu")): + """ + Evaluate a model using a test loader and return a custom classification report. + + Args: + model (torch.nn.Module): PyTorch model to evaluate. + test_loader (torch.utils.data.DataLoader): DataLoader for the test dataset. + device (torch.device, optional): Device for model evaluation. Default is "cpu". + + Returns: + dict: A dictionary containing custom classification report metrics. + """ + + model.eval() + with torch.no_grad(): + X_test, y_test = test_loader.dataset[:][0].to( + device), test_loader.dataset[:][1].to(device) + y_hat_prob = torch.squeeze(model(X_test), 1).cpu() + + return custom_classification_report(y_test.cpu().numpy(), y_hat_prob.cpu().numpy()) + + +column_map = {"object": "VARCHAR(255)", "int64": "INT", "float64": "FLOAT"} + + +def empty_db(): + """ + Empty the database by deleting records from multiple tables and resetting auto-increment counters. + + Returns: + None + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + # my_eng.execute(text(f"DELETE FROM {'DataSets'}")) + my_eng.execute(text(f"DELETE FROM {'Nodes'}")) + my_eng.execute(text(f"DELETE FROM {'FedDatasets'}")) + my_eng.execute(text(f"DELETE FROM {'Networks'}")) + my_eng.execute(text(f"DELETE FROM {'FLsetup'}")) + + my_eng.execute(text(f"DELETE FROM {'FLpipeline'}")) + my_eng.execute(text(f"ALTER TABLE {'Nodes'} AUTO_INCREMENT = 1")) + my_eng.execute(text(f"ALTER TABLE {'Networks'} AUTO_INCREMENT = 1")) + my_eng.execute(text(f"ALTER TABLE {'FedDatasets'} AUTO_INCREMENT = 1")) + my_eng.execute(text(f"ALTER TABLE {'FLsetup'} AUTO_INCREMENT = 1")) + my_eng.execute(text(f"ALTER TABLE {'FLpipeline'} AUTO_INCREMENT = 1")) + my_eng.execute(text(f"DELETE FROM {'testResults'}")) + my_eng.execute(text(f"DROP TABLE IF EXISTS {'MasterDataset'}")) + my_eng.execute(text(f"DROP TABLE IF EXISTS {'DataSets'}")) + + +def get_pipeline_from_name(name): + """ + Get the pipeline ID from its name in the database. + + Args: + name (str): Name of the pipeline. + + Returns: + int: ID of the pipeline. + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + NodeId = int( + pd.read_sql( + text(f"SELECT id FROM FLpipeline WHERE name = '{name}'"), my_eng + ).iloc[0, 0] + ) + return NodeId + + +def get_pipeline_confusion_matrix(pipeline_id): + """ + Get the global confusion matrix for a pipeline based on test results. + + Args: + pipeline_id (int): ID of the pipeline. + + Returns: + dict: A dictionary representing the global confusion matrix. + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + data = pd.read_sql( + text( + f"SELECT confusionmatrix FROM testResults WHERE pipelineid = '{pipeline_id}'"), my_eng + ) + + # Convert the column of strings into a list of dictionaries representing confusion matrices + confusion_matrices = [ + json.loads(matrix.replace("'", "\"")) for matrix in data['confusionmatrix'] + ] + + # Initialize variables for global confusion matrix + global_TP = global_FP = global_FN = global_TN = 0 + + # Iterate through each dictionary and sum the corresponding values for each category + for matrix in confusion_matrices: + global_TP += matrix['TP'] + global_FP += matrix['FP'] + global_FN += matrix['FN'] + global_TN += matrix['TN'] + + # Create a global confusion matrix as a dictionary + global_confusion_matrix = { + 'TP': global_TP, + 'FP': global_FP, + 'FN': global_FN, + 'TN': global_TN + } + # Return the list of dictionaries representing confusion matrices + return global_confusion_matrix + + +def get_node_confusion_matrix(pipeline_id, node_name): + """ + Get the confusion matrix for a specific node in a pipeline based on test results. + + Args: + pipeline_id (int): ID of the pipeline. + node_name (str): Name of the node. + + Returns: + dict: A dictionary representing the confusion matrix for the specified node. + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + data = pd.read_sql( + text( + f"SELECT confusionmatrix FROM testResults WHERE pipelineid = '{pipeline_id}' AND nodename = '{node_name}'"), my_eng + ) + + # Convert the column of strings into a list of dictionaries representing confusion matrices + confusion_matrices = [ + json.loads(matrix.replace("'", "\"")) for matrix in data['confusionmatrix'] + ] + + # Return the list of dictionaries representing confusion matrices + return confusion_matrices[0] + + +def get_pipeline_result(pipeline_id): + """ + Get the test results for a pipeline. + + Args: + pipeline_id (int): ID of the pipeline. + + Returns: + pandas.DataFrame: DataFrame containing test results for the specified pipeline. + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + data = pd.read_sql( + text( + f"SELECT * FROM testResults WHERE pipelineid = '{pipeline_id}'"), my_eng + ) + return data diff --git a/build/lib/MEDfl/NetManager/__init__.py b/build/lib/MEDfl/NetManager/__init__.py new file mode 100644 index 0000000..c583635 --- /dev/null +++ b/build/lib/MEDfl/NetManager/__init__.py @@ -0,0 +1,10 @@ +# # MEDfl/NetworkManager/__init__.py + +# # Import modules from this package +# from .dataset import * +# from .flsetup import * +# from .net_helper import * +# from .net_manager_queries import * +# from .network import * +# from .node import * +# from .database_connector import * diff --git a/build/lib/MEDfl/NetManager/database_connector.py b/build/lib/MEDfl/NetManager/database_connector.py new file mode 100644 index 0000000..ad85ab1 --- /dev/null +++ b/build/lib/MEDfl/NetManager/database_connector.py @@ -0,0 +1,45 @@ +import os +import subprocess , sys +from sqlalchemy import create_engine +from configparser import ConfigParser + +class DatabaseManager: + def __init__(self): + from MEDfl.LearningManager.utils import load_db_config + db_config = load_db_config() + if db_config: + self.config = db_config + else: + self.config = None + self.engine = None + + def connect(self): + if not self.config: + raise ValueError("Database configuration not loaded. Use load_db_config() or set_config_path() first.") + # Assuming the SQLite database file path is provided in the config with the key 'database' + database_path = self.config['database'] + connection_string = f"sqlite:///{database_path}" + self.engine = create_engine(connection_string, pool_pre_ping=True) + + def get_connection(self): + if not self.engine: + self.connect() + return self.engine.connect() + + def create_MEDfl_db(self, path_to_csv): + # Get the directory of the current script + current_directory = os.path.dirname(__file__) + + # Define the path to the create_db.py script + create_db_script_path = os.path.join(current_directory, '..', 'scripts', 'create_db.py') + + # Execute the create_db.py script + print(sys.executable) + result = subprocess.run([sys.executable, create_db_script_path, path_to_csv], + capture_output=True, text=True) + + return + + def close(self): + if self.engine: + self.engine.dispose() diff --git a/build/lib/MEDfl/NetManager/dataset.py b/build/lib/MEDfl/NetManager/dataset.py new file mode 100644 index 0000000..2023495 --- /dev/null +++ b/build/lib/MEDfl/NetManager/dataset.py @@ -0,0 +1,92 @@ +import pandas as pd +from sqlalchemy import text + +from .net_helper import * +from .net_manager_queries import (DELETE_DATASET, INSERT_DATASET, + SELECT_ALL_DATASET_NAMES) +from MEDfl.NetManager.database_connector import DatabaseManager + +class DataSet: + def __init__(self, name: str, path: str, engine=None): + """ + Initialize a DataSet object. + + :param name: The name of the dataset. + :type name: str + :param path: The file path of the dataset CSV file. + :type path: str + """ + self.name = name + self.path = path + db_manager = DatabaseManager() + db_manager.connect() + self.engine = db_manager.get_connection() + + def validate(self): + """ + Validate name and path attributes. + + :raises TypeError: If name or path is not a string. + """ + if not isinstance(self.name, str): + raise TypeError("name argument must be a string") + + if not isinstance(self.path, str): + raise TypeError("path argument must be a string") + + def upload_dataset(self, NodeId=-1): + """ + Upload the dataset to the database. + + :param NodeId: The NodeId associated with the dataset. + :type NodeId: int + + Notes: + - Assumes the file at self.path is a valid CSV file. + - The dataset is uploaded to the 'DataSets' table in the database. + """ + + data_df = pd.read_csv(self.path) + nodeId = NodeId + columns = data_df.columns.tolist() + + + data_df = process_eicu(data_df) + for index, row in data_df.iterrows(): + query_1 = "INSERT INTO DataSets(DataSetName,nodeId," + "".join( + f"{x}," for x in columns + ) + query_2 = f" VALUES ('{self.name}',{nodeId}, " + "".join( + f"{is_str(data_df, row, x)}," for x in columns + ) + query = query_1[:-1] + ")" + query_2[:-1] + ")" + + self.engine.execute(text(query)) + + def delete_dataset(self): + """ + Delete the dataset from the database. + + Notes: + - Assumes the dataset name is unique in the 'DataSets' table. + """ + self.engine.execute(text(DELETE_DATASET), {"name": self.name}) + + def update_data(self): + """ + Update the data in the dataset. + + Not implemented yet. + """ + pass + + @staticmethod + def list_alldatasets(engine): + """ + List all dataset names from the 'DataSets' table. + + :returns: A DataFrame containing the names of all datasets in the 'DataSets' table. + :rtype: pd.DataFrame + """ + res = pd.read_sql(text(SELECT_ALL_DATASET_NAMES), engine) + return res diff --git a/build/lib/MEDfl/NetManager/flsetup.py b/build/lib/MEDfl/NetManager/flsetup.py new file mode 100644 index 0000000..10567ad --- /dev/null +++ b/build/lib/MEDfl/NetManager/flsetup.py @@ -0,0 +1,320 @@ +from datetime import datetime + + +from torch.utils.data import random_split, DataLoader, Dataset + +from MEDfl.LearningManager.federated_dataset import FederatedDataset +from .net_helper import * +from .net_manager_queries import * # Import the sql_queries module +from .network import Network + +from .node import Node + +from MEDfl.NetManager.database_connector import DatabaseManager + + +class FLsetup: + def __init__(self, name: str, description: str, network: Network): + """Initialize a Federated Learning (FL) setup. + + Args: + name (str): The name of the FL setup. + description (str): A description of the FL setup. + network (Network): An instance of the Network class representing the network architecture. + """ + self.name = name + self.description = description + self.network = network + self.column_name = None + self.auto = 1 if self.column_name is not None else 0 + self.validate() + self.fed_dataset = None + + db_manager = DatabaseManager() + db_manager.connect() + self.eng = db_manager.get_connection() + + + + def validate(self): + """Validate name, description, and network.""" + if not isinstance(self.name, str): + raise TypeError("name argument must be a string") + + if not isinstance(self.description, str): + raise TypeError("description argument must be a string") + + if not isinstance(self.network, Network): + raise TypeError( + "network argument must be a MEDfl.NetManager.Network " + ) + + def create(self): + """Create an FL setup.""" + creation_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + netid = get_netid_from_name(self.network.name) + self.eng.execute( + text(CREATE_FLSETUP_QUERY), + { + "name": self.name, + "description": self.description, + "creation_date": creation_date, + "net_id": netid, + "column_name": self.column_name, + }, + ) + self.id = get_flsetupid_from_name(self.name) + + def delete(self): + """Delete the FL setup.""" + if self.fed_dataset is not None: + self.fed_dataset.delete_Flsetup(FLsetupId=self.id) + self.eng.execute(text(DELETE_FLSETUP_QUERY), {"name": self.name}) + + @classmethod + def read_setup(cls, FLsetupId: int): + """Read the FL setup by FLsetupId. + + Args: + FLsetupId (int): The id of the FL setup to read. + + Returns: + FLsetup: An instance of the FLsetup class with the specified FLsetupId. + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + res = pd.read_sql( + text(READ_SETUP_QUERY), my_eng, params={"flsetup_id": FLsetupId} + ).iloc[0] + + network_res = pd.read_sql( + text(READ_NETWORK_BY_ID_QUERY), + my_eng, + params={"net_id": int(res["NetId"])}, + ).iloc[0] + network = Network(network_res["NetName"]) + setattr(network, "id", res["NetId"]) + fl_setup = cls(res["name"], res["description"], network) + if res["column_name"] == str(None): + res["column_name"] = None + setattr(fl_setup, "column_name", res["column_name"]) + setattr(fl_setup, "id", res["FLsetupId"]) + + return fl_setup + + @staticmethod + def list_allsetups(): + """List all the FL setups. + + Returns: + DataFrame: A DataFrame containing information about all the FL setups. + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + Flsetups = pd.read_sql(text(READ_ALL_SETUPS_QUERY), my_eng) + return Flsetups + + def create_nodes_from_master_dataset(self, params_dict: dict): + """Create nodes from the master dataset. + + Args: + params_dict (dict): A dictionary containing parameters for node creation. + - column_name (str): The name of the column in the MasterDataset used to create nodes. + - train_nodes (list): A list of node names that will be used for training. + - test_nodes (list): A list of node names that will be used for testing. + + Returns: + list: A list of Node instances created from the master dataset. + """ + assert "column_name" in params_dict.keys() + column_name, train_nodes, test_nodes = ( + params_dict["column_name"], + params_dict["train_nodes"], + params_dict["test_nodes"], + ) + self.column_name = column_name + self.auto = 1 + + # Update the Column name of the auto flSetup + query = f"UPDATE FLsetup SET column_name = '{column_name}' WHERE name = '{self.name}'" + self.eng.execute(text(query)) + + + # Add Network to DB + # self.network.create_network() + + netid = get_netid_from_name(self.network.name) + + assert self.network.mtable_exists == 1 + node_names = pd.read_sql( + text(READ_DISTINCT_NODES_QUERY.format(column_name)), self.eng + ) + + nodes = [Node(val[0], 1) for val in node_names.values.tolist()] + + used_nodes = [] + + for node in nodes: + if node.name in train_nodes: + node.train = 1 + node.create_node(netid) + used_nodes.append(node) + if node.name in test_nodes: + node.train =0 + node.create_node(netid) + used_nodes.append(node) + return used_nodes + + def create_dataloader_from_node( + self, + node: Node, + output, + fill_strategy="mean", fit_encode=[], to_drop=[], + train_batch_size: int = 32, + test_batch_size: int = 1, + split_frac: float = 0.2, + dataset: Dataset = None, + + ): + """Create DataLoader from a Node. + + Args: + node (Node): The node from which to create DataLoader. + train_batch_size (int): The batch size for training data. + test_batch_size (int): The batch size for test data. + split_frac (float): The fraction of data to be used for training. + dataset (Dataset): The dataset to use. If None, the method will read the dataset from the node. + + Returns: + DataLoader: The DataLoader instances for training and testing. + """ + if dataset is None: + if self.column_name is not None: + dataset = process_data_after_reading( + node.get_dataset(self.column_name), output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop + ) + else: + dataset = process_data_after_reading( + node.get_dataset(), output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop) + + dataset_size = len(dataset) + traindata_size = int(dataset_size * (1 - split_frac)) + traindata, testdata = random_split( + dataset, [traindata_size, dataset_size - traindata_size] + ) + trainloader, testloader = DataLoader( + traindata, batch_size=train_batch_size + ), DataLoader(testdata, batch_size=test_batch_size) + return trainloader, testloader + + def create_federated_dataset( + self, output, fill_strategy="mean", fit_encode=[], to_drop=[], val_frac=0.1, test_frac=0.2 + ) -> FederatedDataset: + """Create a federated dataset. + + Args: + output (string): the output feature of the dataset + val_frac (float): The fraction of data to be used for validation. + test_frac (float): The fraction of data to be used for testing. + + Returns: + FederatedDataset: The FederatedDataset instance containing train, validation, and test data. + """ + + if not self.column_name: + to_drop.extend(["DataSetName" , "NodeId" , "DataSetId"]) + else : + to_drop.extend(["PatientId"]) + + netid = self.network.id + train_nodes = pd.read_sql( + text( + f"SELECT Nodes.NodeName FROM Nodes WHERE Nodes.NetId = {netid} AND Nodes.train = 1 " + ), + self.eng, + ) + test_nodes = pd.read_sql( + text( + f"SELECT Nodes.NodeName FROM Nodes WHERE Nodes.NetId = {netid} AND Nodes.train = 0 " + ), + self.eng, + ) + + train_nodes = [ + Node(val[0], 1, test_frac) for val in train_nodes.values.tolist() + ] + test_nodes = [Node(val[0], 0) for val in test_nodes.values.tolist()] + + trainloaders, valloaders, testloaders = [], [], [] + # if len(test_nodes) == 0: + # raise "test node empty" + if test_nodes is None: + _, testloader = self.create_dataloader_from_node( + train_nodes[0], output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop) + testloaders.append(testloader) + else: + for train_node in train_nodes: + train_valloader, testloader = self.create_dataloader_from_node( + train_node, output, fill_strategy=fill_strategy, + fit_encode=fit_encode, to_drop=to_drop,) + trainloader, valloader = self.create_dataloader_from_node( + train_node, + output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop, + test_batch_size=32, + split_frac=val_frac, + dataset=train_valloader.dataset, + ) + trainloaders.append(trainloader) + valloaders.append(valloader) + testloaders.append(testloader) + + for test_node in test_nodes: + _, testloader = self.create_dataloader_from_node( + test_node, output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop, split_frac=1.0 + ) + testloaders.append(testloader) + train_nodes_names = [node.name for node in train_nodes] + test_nodes_names = train_nodes_names + [ + node.name for node in test_nodes + ] + + # test_nodes_names = [ + # node.name for node in test_nodes + # ] + + # Add FlSetup on to the DataBase + # self.create() + + # self.network.update_network(FLsetupId=self.id) + fed_dataset = FederatedDataset( + self.name + "_Feddataset", + train_nodes_names, + test_nodes_names, + trainloaders, + valloaders, + testloaders, + ) + self.fed_dataset = fed_dataset + self.fed_dataset.create(self.id) + return self.fed_dataset + + + + + def get_flDataSet(self): + """ + Retrieve the federated dataset associated with the FL setup using the FL setup's name. + + Returns: + pandas.DataFrame: DataFrame containing the federated dataset information. + """ + return pd.read_sql( + text( + f"SELECT * FROM FedDatasets WHERE FLsetupId = {get_flsetupid_from_name(self.name)}" + ), + self.eng, + ) diff --git a/build/lib/MEDfl/NetManager/net_helper.py b/build/lib/MEDfl/NetManager/net_helper.py new file mode 100644 index 0000000..2d9aff0 --- /dev/null +++ b/build/lib/MEDfl/NetManager/net_helper.py @@ -0,0 +1,254 @@ +from sklearn.preprocessing import LabelEncoder +from sklearn.impute import SimpleImputer + +from sqlalchemy import text + +import torch +import pandas as pd +from torch.utils.data import TensorDataset +import numpy as np + +from MEDfl.NetManager.database_connector import DatabaseManager + + +def is_str(data_df, row, x): + """ + Check if a column in a DataFrame is of type 'object' and convert the value accordingly. + + Args: + data_df (pandas.DataFrame): DataFrame containing the data. + row (pandas.Series): Data row. + x (str): Column name. + + Returns: + str or float: Processed value based on the column type. + """ + if data_df[x].dtype == "object": + x = f"'{row[x]}'" + else: + x = row[x] + return x + + +def process_eicu(data_df): + """ + Process eICU data by filling missing values with mean and replacing NaNs with 'Unknown'. + + Args: + data_df (pandas.DataFrame): Input data. + + Returns: + pandas.DataFrame: Processed data. + """ + # Identify numeric and non-numeric columns + numeric_columns = data_df.select_dtypes(include=[np.number]).columns + non_numeric_columns = data_df.select_dtypes(exclude=[np.number]).columns + + # Fill NaN in numeric columns with mean + data_df[numeric_columns] = data_df[numeric_columns].fillna( + data_df[numeric_columns].mean()) + + # Fill NaN in non-numeric columns with 'Unknown' + data_df[non_numeric_columns] = data_df[non_numeric_columns].fillna( + 'Unknown') + + try: + data_df = data_df.reset_index(drop=True) + except: + pass + + return data_df + + +# remove indiserd columns after reading from the DB +def process_data_after_reading(data, output, fill_strategy="mean", fit_encode=[], to_drop=[]): + """ + Process data after reading from the database, including encoding, dropping columns, and creating a PyTorch TensorDataset. + + Args: + data (pandas.DataFrame): Input data. + output (str): Output column name. + fill_strategy (str, optional): Imputation strategy for missing values. Default is "mean". + fit_encode (list, optional): List of columns to be label-encoded. Default is an empty list. + to_drop (list, optional): List of columns to be dropped from the DataFrame. Default is an empty list. + + Returns: + torch.utils.data.TensorDataset: Processed data as a PyTorch TensorDataset. + """ + + # Check if there is a DataSet assigned to the node + if (len(data) == 0): + raise "Node doesn't Have dataSet" + + encoder = LabelEncoder() + # En Code some columns + for s in fit_encode: + try: + data[s] = encoder.fit_transform(data[s]) + except: + raise print(s) + + # The output of the DATA + y = data[output] + + X = data + + # remove indisered columns when reading the dataframe from the DB + for column in to_drop: + try: + X = X.drop( + [column], axis=1 + ) + except Exception as e: + raise e + + # Get the DATAset Features + features = [col for col in X.columns if col != output] + + # Impute missing values using the mean strategy + try: + imputer = SimpleImputer(strategy=fill_strategy) + X[features] = imputer.fit_transform(X[features]) + except: + print() + + X = torch.tensor(X.values, dtype=torch.float32) + y = torch.tensor(y.values, dtype=torch.float32) + data = TensorDataset(X, y) + + return data + + +def get_nodeid_from_name(name): + """ + Get the NodeId from the Nodes table based on the NodeName. + + Args: + name (str): Node name. + + Returns: + int or None: NodeId or None if not found. + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + result_proxy = my_eng.execute(f"SELECT NodeId FROM Nodes WHERE NodeName = '{name}'") + NodeId = int(result_proxy.fetchone()[0]) + return NodeId + + +def get_netid_from_name(name): + """ + Get the Network Id from the Networks table based on the NetName. + + Args: + name (str): Network name. + + Returns: + int or None: NetId or None if not found. + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + try: + result_proxy = my_eng.execute(f"SELECT NetId FROM Networks WHERE NetName = '{name}'") + NetId = int(result_proxy.fetchone()[0]) + except: + NetId = None + return NetId + + +def get_flsetupid_from_name(name): + """ + Get the FLsetupId from the FLsetup table based on the FL setup name. + + Args: + name (str): FL setup name. + + Returns: + int or None: FLsetupId or None if not found. + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + try: + + result_proxy = my_eng.execute(f"SELECT FLsetupId FROM FLsetup WHERE name = '{name}'") + id = int(result_proxy.fetchone()[0]) + + except: + id = None + return id + + +def get_flpipeline_from_name(name): + """ + Get the FLpipeline Id from the FLpipeline table based on the FL pipeline name. + + Args: + name (str): FL pipeline name. + + Returns: + int or None: FLpipelineId or None if not found. + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + try: + + result_proxy = my_eng.execute(f"SELECT id FROM FLpipeline WHERE name = '{name}'") + id = int(result_proxy.fetchone()[0]) + except: + id = None + return id + + +def get_feddataset_id_from_name(name): + """ + Get the Federated dataset Id from the FedDatasets table based on the federated dataset name. + + Args: + name (str): Federated dataset name. + + Returns: + int or None: FedId or None if not found. + """ + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + try: + + result_proxy = my_eng.execute(f"SELECT FedId FROM FedDatasets WHERE name = '{name}'") + id = int(result_proxy.fetchone()[0]) + except: + id = None + return id + + +def master_table_exists(): + """ + Check if the MasterDataset table exists in the database. + + Returns: + bool: True if the table exists, False otherwise. + """ + try: + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + # SQLite-specific query to check if table exists + sql_query = text("SELECT name FROM sqlite_master WHERE type='table' AND name='MasterDataset'") + result = my_eng.execute(sql_query) + exists = result.fetchone() is not None + return exists + + except Exception as e: + print(f"Error checking MasterDataset table existence: {e}") + return False + diff --git a/build/lib/MEDfl/NetManager/net_manager_queries.py b/build/lib/MEDfl/NetManager/net_manager_queries.py new file mode 100644 index 0000000..dd83969 --- /dev/null +++ b/build/lib/MEDfl/NetManager/net_manager_queries.py @@ -0,0 +1,142 @@ +from .net_helper import is_str + +INSERT_DATASET = """ + INSERT INTO DataSets(DataSetName, NodeId, {columns}) + VALUES (:name, :NodeId, {values}) +""" +DELETE_DATASET = """ + DELETE FROM DataSets WHERE DataSetName = :name +""" + +SELECT_ALL_DATASET_NAMES = """ + SELECT DISTINCT DataSetName,NodeId FROM DataSets +""" + +SELECT_DATASET_BY_NAME = """ + SELECT * FROM DataSets WHERE DataSetName = :name +""" + +# node queries +# sql_queries.py + +INSERT_NODE_QUERY = ( + "INSERT INTO Nodes(NodeName,NetId,train) VALUES ('{}',{}, {})" +) +DELETE_NODE_QUERY = "DELETE FROM Nodes WHERE NodeName = '{}'" +SELECT_MASTER_COLUMNS_QUERY = "SELECT * FROM MasterDataset LIMIT 1" +SELECT_DATASET_BY_COLUMN_QUERY = "SELECT * FROM MasterDataset WHERE {} = '{}'" +SELECT_DATASET_BY_NODE_ID_QUERY = "SELECT * FROM DataSets WHERE NodeId = {}" + +SELECT_ALL_DATASETS_QUERY = "SELECT DISTINCT DataSetName,NodeName FROM DataSets,Nodes WHERE Nodes.NodeName = '{}' and Nodes.NodeId = DataSets.NodeId" +SELECT_ALL_NODES_QUERY = "SELECT * FROM Nodes" + + +# SQL query to insert a new network +INSERT_NETWORK_QUERY = "INSERT INTO Networks(NetName) VALUES (:name)" + +# SQL query to delete a network +DELETE_NETWORK_QUERY = "DELETE FROM Networks WHERE NetName = '{name}'" + +# SQL query to delete a network +GET_NETWORK_QUERY = "SELECT * FROM Networks WHERE NetName = '{name}'" + + +# SQL query to update a network +UPDATE_NETWORK_QUERY = ( + "UPDATE Networks SET FLsetupId = {FLsetupId} WHERE NetId = {id}" +) + +# SQL query to retrieve all nodes for a network +LIST_ALL_NODES_QUERY = """ +SELECT Nodes.NodeName, Networks.NetName +FROM Nodes +JOIN Networks ON Networks.NetId = Nodes.NetId +WHERE Networks.NetName = :name +""" + +# SQL query to create the MasterDataset table (SQLite-compatible) +CREATE_MASTER_DATASET_TABLE_QUERY = """ +CREATE TABLE IF NOT EXISTS MasterDataset ( + PatientId INTEGER PRIMARY KEY AUTOINCREMENT, + {} +); +""" + + +# SQL query to create the datasets table (SQLite-compatible) +CREATE_DATASETS_TABLE_QUERY = """ +CREATE TABLE IF NOT EXISTS Datasets ( + DataSetId INTEGER PRIMARY KEY AUTOINCREMENT, + DataSetName VARCHAR(255), + NodeId INT, + {} +); +""" + + + +# SQL query to insert dataset values +INSERT_DATASET_VALUES_QUERY = "INSERT INTO MasterDataset({columns}, NodeId) VALUES ('{name}', {nodeId}, {values})" + + +# FL setup_queries +# sql_queries.py + +CREATE_FLSETUP_QUERY = """ + INSERT INTO FLsetup (name, description, creation_date, NetId, column_name) + VALUES (:name, :description, :creation_date, :net_id, :column_name) +""" + +DELETE_FLSETUP_QUERY = """ + DELETE FROM FLsetup + WHERE name = :name +""" + +UPDATE_FLSETUP_QUERY = UPDATE_NETWORK_QUERY = ( + "UPDATE FLsetup SET column_name ='{column_name}' WHERE name ='{FLsetupName}'" +) + + +READ_SETUP_QUERY = """ + SELECT * FROM FLsetup + WHERE FLsetupId = :flsetup_id +""" + +READ_ALL_SETUPS_QUERY = """ + SELECT * FROM FLsetup +""" + +READ_NETWORK_BY_ID_QUERY = """ + SELECT * FROM Networks + WHERE NetId = :net_id +""" + +READ_DISTINCT_NODES_QUERY = """ +SELECT DISTINCT {} FROM MasterDataset +""" + + +# FederatedDataset Queries +INSERT_FLDATASET_QUERY = ( + "INSERT INTO FedDatasets(name, FLsetupId) VALUES (:name, :FLsetupId)" +) +DELETE_FLDATASET_BY_SETUP_AND_PIPELINE_QUERY = "DELETE FROM FedDatasets WHERE FLsetupId = :FLsetupId AND FLpipeId = :FLpipeId" + + +UPDATE_FLDATASET_QUERY = ( + "UPDATE FedDatasets SET FLpipeId = :FLpipeId WHERE FedId = :FedId" +) +SELECT_FLDATASET_BY_NAME_QUERY = "SELECT * FROM FedDatasets WHERE name = :name" + +CREATE_FLPIPELINE_QUERY = """ +INSERT INTO FLpipeline (name, description, creation_date, results) +VALUES ('{name}', '{description}', '{creation_date}', '{result}') +""" +DELETE_FLPIPELINE_QUERY = "DELETE FROM FLpipeline WHERE name = '{name}'" + +SELECT_FLPIPELINE_QUERY = "SELECT FROM FLpipeline WHERE name = '{name}'" + +CREATE_TEST_RESULTS_QUERY = """ +INSERT INTO testResults (pipelineid, nodename, confusionmatrix, accuracy , sensivity, ppv , npv , f1score , fpr , tpr ) +VALUES ('{pipelineId}', '{nodeName}', '{confusion_matrix}', '{accuracy}' , '{sensivity}' , '{ppv}' , '{npv}' , '{f1score}' , '{fpr}' , '{tpr}') +""" diff --git a/build/lib/MEDfl/NetManager/network.py b/build/lib/MEDfl/NetManager/network.py new file mode 100644 index 0000000..65cac7c --- /dev/null +++ b/build/lib/MEDfl/NetManager/network.py @@ -0,0 +1,194 @@ +# src/MEDfl/NetManager/network.py + +from MEDfl.LearningManager.utils import * +from .net_helper import * +from .net_manager_queries import (CREATE_MASTER_DATASET_TABLE_QUERY, + CREATE_DATASETS_TABLE_QUERY, + DELETE_NETWORK_QUERY, + INSERT_NETWORK_QUERY, LIST_ALL_NODES_QUERY, + UPDATE_NETWORK_QUERY, GET_NETWORK_QUERY) +from .node import Node +import pandas as pd +from MEDfl.LearningManager.utils import params + +from sqlalchemy import text +from sqlalchemy.exc import SQLAlchemyError + +class Network: + """ + A class representing a network. + + Attributes: + name (str): The name of the network. + mtable_exists (int): An integer flag indicating whether the MasterDataset table exists (1) or not (0). + """ + + def __init__(self, name: str = ""): + """ + Initialize a Network instance. + + Parameters: + name (str): The name of the network. + """ + self.name = name + self.mtable_exists = int(master_table_exists()) + self.validate() + + db_manager = DatabaseManager() + db_manager.connect() + self.eng = db_manager.get_connection() + + def validate(self): + """Validate name""" + + if not isinstance(self.name, str): + raise TypeError("name argument must be a string") + + def create_network(self): + """Create a new network in the database.""" + try: + print(self.name) + self.eng.execute(text(INSERT_NETWORK_QUERY), {"name": self.name}) + self.id = self.get_netid_from_name(self.name) + except SQLAlchemyError as e: + print(f"Error creating network: {e}") + + def use_network(self, network_name: str): + """Use a network in the database. + + Parameters: + network_name (str): The name of the network to use. + + Returns: + Network or None: An instance of the Network class if the network exists, else None. + """ + try: + network = pd.read_sql( + text(GET_NETWORK_QUERY), + self.eng, + params={"name": network_name} + ) + if not network.empty: + self.name = network.iloc[0]['NetName'] + self.id = network.iloc[0]['NetId'] + self.mtable_exists = int(master_table_exists()) + self.validate() + return self + else: + return None + except SQLAlchemyError as e: + print(f"Error using network: {e}") + return None + + def delete_network(self): + """Delete the network from the database.""" + try: + self.eng.execute(text(DELETE_NETWORK_QUERY), {"name": self.name}) + except SQLAlchemyError as e: + print(f"Error deleting network: {e}") + + def update_network(self, FLsetupId: int): + """Update the network's FLsetupId in the database. + + Parameters: + FLsetupId (int): The FLsetupId to update. + """ + try: + self.eng.execute( + text(UPDATE_NETWORK_QUERY), + {"FLsetupId": FLsetupId, "id": self.id} + ) + except SQLAlchemyError as e: + print(f"Error updating network: {e}") + + def add_node(self, node: Node): + """Add a node to the network. + + Parameters: + node (Node): The node to add. + """ + node.create_node(self.id) + + def list_allnodes(self): + """List all nodes in the network. + + Returns: + DataFrame: A DataFrame containing information about all nodes in the network. + """ + try: + query = text(LIST_ALL_NODES_QUERY) + result_proxy = self.eng.execute(query, name=self.name) + result_df = pd.DataFrame(result_proxy.fetchall(), columns=result_proxy.keys()) + return result_df + except SQLAlchemyError as e: + print(f"Error listing all nodes: {e}") + return pd.DataFrame() + + def create_master_dataset(self, path_to_csv: str = params['path_to_master_csv']): + """ + Create the MasterDataset table and insert dataset values. + + :param path_to_csv: Path to the CSV file containing the dataset. + """ + try: + print(path_to_csv) + data_df = pd.read_csv(path_to_csv) + + if self.mtable_exists != 1: + columns = data_df.columns.tolist() + columns_str = ",\n".join( + [ + f"{col} {column_map[str(data_df[col].dtype)]}" + for col in columns + ] + ) + self.eng.execute( + text(CREATE_MASTER_DATASET_TABLE_QUERY.format(columns_str)) + ) + self.eng.execute(text(CREATE_DATASETS_TABLE_QUERY.format(columns_str))) + + # Process data + data_df = process_eicu(data_df) + + # Insert data in batches + batch_size = 1000 # Adjust as needed + for start_idx in range(0, len(data_df), batch_size): + batch_data = data_df.iloc[start_idx:start_idx + batch_size] + insert_query = f"INSERT INTO MasterDataset ({', '.join(columns)}) VALUES ({', '.join([':' + col for col in columns])})" + data_to_insert = batch_data.to_dict(orient='records') + self.eng.execute(text(insert_query), data_to_insert) + + self.mtable_exists = 1 + except SQLAlchemyError as e: + print(f"Error creating master dataset: {e}") + + @staticmethod + def list_allnetworks(): + """List all networks in the database. + + Returns: + DataFrame: A DataFrame containing information about all networks in the database. + """ + try: + db_manager = DatabaseManager() + db_manager.connect() + my_eng = db_manager.get_connection() + + result_proxy = my_eng.execute("SELECT * FROM Networks") + result = result_proxy.fetchall() + return pd.DataFrame(result, columns=result_proxy.keys()) + except SQLAlchemyError as e: + print(f"Error listing all networks: {e}") + return pd.DataFrame() + + def get_netid_from_name(self, name): + """Get network ID from network name.""" + try: + result = self.eng.execute(text("SELECT NetId FROM Networks WHERE NetName = :name"), {"name": name}).fetchone() + if result: + return result[0] + else: + return None + except SQLAlchemyError as e: + print(f"Error fetching network ID: {e}") + return None diff --git a/build/lib/MEDfl/NetManager/node.py b/build/lib/MEDfl/NetManager/node.py new file mode 100644 index 0000000..9029d69 --- /dev/null +++ b/build/lib/MEDfl/NetManager/node.py @@ -0,0 +1,184 @@ +import pandas as pd + +from .net_helper import * +from .net_manager_queries import * +from MEDfl.LearningManager.utils import params +from MEDfl.NetManager.database_connector import DatabaseManager + +from sqlalchemy import text, exc + + +class Node: + """ + A class representing a node in the network. + + Attributes: + name (str): The name of the node. + train (int): An integer flag representing whether the node is used for training (1) or testing (0). + test_fraction (float, optional): The fraction of data used for testing when train=1. Default is 0.2. + """ + + def __init__( + self, name: str, train: int, test_fraction: float = 0.2, engine=None + ): + """ + Initialize a Node instance. + + Parameters: + name (str): The name of the node. + train (int): An integer flag representing whether the node is used for training (1) or testing (0). + test_fraction (float, optional): The fraction of data used for testing when train=1. Default is 0.2. + """ + self.name = name + self.train = train + self.test_fraction = 1.0 if self.train == 0 else test_fraction + + + db_manager = DatabaseManager() ; + db_manager.connect() ; + self.engine = db_manager.get_connection() + + def validate(self): + """Validate name, train, test_fraction""" + if not isinstance(self.name, str): + raise TypeError("name argument must be a string") + + if not isinstance(self.train, int): + raise TypeError("train argument must be an int") + + if not isinstance(self.test_fraction, float): + raise TypeError("test_fraction argument must be a float") + + def create_node(self, NetId: int): + """Create a node in the database. + Parameters: + NetId (int): The ID of the network to which the node belongs. + + Returns: + None + """ + self.engine.execute( + text(INSERT_NODE_QUERY.format(self.name, NetId, self.train)) + ) + + def delete_node(self): + """Delete the node from the database.""" + self.engine.execute(text(DELETE_NODE_QUERY.format(self.name))) + + def check_dataset_compatibility(self, data_df): + """Check if the dataset is compatible with the master dataset. + Parameters: + data_df (DataFrame): The dataset to check. + + Returns: + None + """ + if master_table_exists() != 1: + print("MasterDataset doesn't exist") + else: + columns = data_df.columns.tolist() + + # get master_dataset columns + result_proxy = self.engine.execute(SELECT_MASTER_COLUMNS_QUERY) + master_table_columns = result_proxy.keys() + + + assert [x == y for x, y in zip(master_table_columns, columns)] + + def update_node(self): + """Update the node information (not implemented).""" + pass + + def get_dataset(self, column_name: str = None): + """Get the dataset for the node based on the given column name. + Parameters: + column_name (str, optional): The column name to filter the dataset. Default is None. + + Returns: + DataFrame: The dataset associated with the node. + """ + NodeId = get_nodeid_from_name(self.name) + if column_name is not None: + query = text(SELECT_DATASET_BY_COLUMN_QUERY.format(column_name, self.name)) + else: + query = text(SELECT_DATASET_BY_NODE_ID_QUERY.format(NodeId)) + + result_proxy = self.engine.execute(query) + node_dataset = pd.DataFrame(result_proxy.fetchall(), columns=result_proxy.keys()) + + return node_dataset + + def upload_dataset(self, dataset_name: str, path_to_csv: str = params['path_to_test_csv']): + """Upload the dataset to the database for the node. + + Parameters: + dataset_name (str): The name of the dataset. + path_to_csv (str, optional): Path to the CSV file containing the dataset. Default is the path in params. + + Returns: + None + """ + try: + data_df = pd.read_csv(path_to_csv) + nodeId = get_nodeid_from_name(self.name) + columns = data_df.columns.tolist() + self.check_dataset_compatibility(data_df) + + data_df = process_eicu(data_df) + + # Insert data in batches + batch_size = 1000 # Adjust as needed + for start_idx in range(0, len(data_df), batch_size): + batch_data = data_df.iloc[start_idx:start_idx + batch_size] + insert_query = f"INSERT INTO Datasets (DataSetName, NodeId, {', '.join(columns)}) VALUES (:dataset_name, :nodeId, {', '.join([':' + col for col in columns])})" + data_to_insert = batch_data.to_dict(orient='records') + params = [{"dataset_name": dataset_name, "nodeId": nodeId, **row} for row in data_to_insert] + self.engine.execute(text(insert_query), params) + except exc.SQLAlchemyError as e: + print(f"Error uploading dataset: {e}") + + def assign_dataset(self, dataset_name:str): + """Assigning existing dataSet to node + Parameters: + dataset_name (str): The name of the dataset to assign. + + Returns: + None + """ + + nodeId = get_nodeid_from_name(self.name) + query = f"UPDATE DataSets SET nodeId = {nodeId} WHERE DataSetName = '{dataset_name}'" + self.engine.execute(text(query)) + + def unassign_dataset(self, dataset_name:str): + """unssigning existing dataSet to node + Parameters: + dataset_name (str): The name of the dataset to assign. + + Returns: + None + """ + + query = f"UPDATE DataSets SET nodeId = {-1} WHERE DataSetName = '{dataset_name}'" + self.engine.execute(text(query)) + + def list_alldatasets(self): + """List all datasets associated with the node. + Returns: + DataFrame: A DataFrame containing information about all datasets associated with the node. + + """ + return pd.read_sql( + text(SELECT_ALL_DATASETS_QUERY.format(self.name)), my_eng + ) + + @staticmethod + def list_allnodes(): + """List all nodes in the database. + Returns: + DataFrame: A DataFrame containing information about all nodes in the database. + + """ + query = text(SELECT_ALL_NODES_QUERY) + res = pd.read_sql(query, my_eng) + return res diff --git a/build/lib/MEDfl/__init__.py b/build/lib/MEDfl/__init__.py new file mode 100644 index 0000000..9763e3e --- /dev/null +++ b/build/lib/MEDfl/__init__.py @@ -0,0 +1,4 @@ +from .LearningManager import * +from .NetManager import * +from .scripts import * +from .rw import * \ No newline at end of file diff --git a/build/lib/MEDfl/rw/__init__.py b/build/lib/MEDfl/rw/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/build/lib/MEDfl/rw/client.py b/build/lib/MEDfl/rw/client.py new file mode 100644 index 0000000..4fed968 --- /dev/null +++ b/build/lib/MEDfl/rw/client.py @@ -0,0 +1,421 @@ +# File: client.py + +import argparse +import pandas as pd +import flwr as fl +import torch +import torch.nn as nn +import torch.optim as optim +from torch.utils.data import TensorDataset, DataLoader +from sklearn.metrics import accuracy_score, roc_auc_score +from sklearn.model_selection import train_test_split +from MEDfl.rw.model import Net # votre définition de modèle +import socket +import platform +import psutil +import shutil +import numpy as np + +try: + import GPUtil +except ImportError: + GPUtil = None + + +class DPConfig: + """ + Configuration for differential privacy. + + Attributes: + noise_multiplier (float): Noise multiplier for DP. + max_grad_norm (float): Maximum gradient norm for clipping. + batch_size (int): Batch size for training. + secure_rng (bool): Use a secure random generator. + """ + + def __init__( + self, + noise_multiplier=1.0, + max_grad_norm=1.0, + batch_size=32, + secure_rng=False, + ): + self.noise_multiplier = noise_multiplier + self.max_grad_norm = max_grad_norm + self.batch_size = batch_size + self.secure_rng = secure_rng + + +class FlowerClient(fl.client.NumPyClient): + def __init__( + self, + server_address, + data_path="data/data.csv", + dp_config=None, + # NEW (optional client overrides; do NOT remove old args) + val_frac=None, + test_frac=None, + id_col=None, + test_ids=None, + seed=42, + ): + self.server_address = server_address + self.dp_config = dp_config + self.client_val_frac = val_frac + self.client_test_frac = test_frac + self.id_col = id_col + self.test_ids = test_ids + self.seed = seed + + # Load the CSV once; actual column selection happens on first fit using server config + self._df = pd.read_csv(data_path) + + # Defaults used only for get_properties BEFORE first fit (last column target) + self.feature_names = self._df.columns[:-1].tolist() + self.target_name = self._df.columns[-1] + self.label_counts = self._df[self.target_name].value_counts().to_dict() + self.classes = sorted(self.label_counts.keys()) + + # Tensors for metrics before first fit (fallback to all-but-last as features) + X_default = self._df.iloc[:, :-1].values + y_default = self._df.iloc[:, -1].values + self.X_tensor = torch.tensor(X_default, dtype=torch.float32) + self.y_tensor = torch.tensor(y_default, dtype=torch.float32) + + # Placeholders; we lazily build loaders/model on the first fit when we see server config + self.train_loader = None + self.val_loader = None + self.test_loader = None + + self.model = None + self.criterion = nn.BCEWithLogitsLoss() + self.optimizer = None + + # Effective settings (filled at first fit) + self.effective_features = self.feature_names[:] + self.effective_target = self.target_name + self.effective_val_frac = float(self.client_val_frac) if self.client_val_frac is not None else 0.0 + self.effective_test_frac = float(self.client_test_frac) if self.client_test_frac is not None else 0.0 + + self._initialized = False + self._dp_attached = False # to avoid wrapping twice + + # ---------- helpers + + def _mk_loader(self, X, y, batch_size, shuffle): + x_t = torch.tensor(X, dtype=torch.float32) + y_t = torch.tensor(y, dtype=torch.float32) + return DataLoader(TensorDataset(x_t, y_t), batch_size=batch_size, shuffle=shuffle) + + def _lazy_init_from_server_config(self, config): + """ + Build model and (train, val, test) loaders once, using: + - Server-enforced schema: config['features'] (comma-separated), config['target'] + - Split fractions: client overrides win; else use server's val_fraction/test_fraction + - NEW: if config['test_ids'] is set (per-client from strategy), use ID-based split + """ + # ---------- schema from server (enforced if provided) + srv_features = (config.get("features") or "").strip() + srv_target = (config.get("target") or "").strip() + print(f"[Client] Initializing with server schema: features='{srv_features}', target='{srv_target}'") + + if srv_target: + if srv_target not in self._df.columns: + raise ValueError(f"Server-specified target '{srv_target}' not in CSV columns {list(self._df.columns)}") + target_col = srv_target + else: + target_col = self._df.columns[-1] # fallback (keeps backward compatibility) + + if srv_features: + feat_cols = [c.strip() for c in srv_features.split(",") if c.strip()] + missing = [c for c in feat_cols if c not in self._df.columns] + if missing: + raise ValueError(f"Server-specified feature(s) not found in CSV: {missing}") + else: + feat_cols = [c for c in self._df.columns if c != target_col] + + # ---------- fractions: client overrides > server defaults > fallback (0.10/0.10) + srv_val = config.get("val_fraction", None) + srv_test = config.get("test_fraction", None) + val_frac = self.client_val_frac if self.client_val_frac is not None else (float(srv_val) if srv_val is not None else 0.10) + test_frac = self.client_test_frac if self.client_test_frac is not None else (float(srv_test) if srv_test is not None else 0.10) + + if not (0.0 <= val_frac < 1.0): + raise ValueError(f"Invalid val_frac: {val_frac} (must be 0 <= val_frac < 1)") + + # ---------- NEW: adopt test_ids / id_col from server config if provided + if (not self.test_ids or not self.test_ids.strip()) and config.get("test_ids"): + + # strategy (per_client) can inject test_ids as list or CSV; normalize to CSV string + ti = config.get("test_ids") + if isinstance(ti, (list, tuple, set)): + self.test_ids = ",".join(str(x) for x in ti) + else: + self.test_ids = str(ti) + + print(f"[Client] Using server-provided test_ids: {self.test_ids}") + + if (not self.id_col) and config.get("id_col"): + self.id_col = str(config.get("id_col")) + + # ---------- extract arrays with the enforced schema + X_all = self._df[feat_cols].values + y_all = self._df[target_col].values + + # Keep tensors for global metrics logging (same behavior as before) + self.X_tensor = torch.tensor(X_all, dtype=torch.float32) + self.y_tensor = torch.tensor(y_all, dtype=torch.float32) + + # ---------- split + if self.test_ids and self.test_ids.strip(): # ID-based mode (unchanged behavior, now also supports server-provided IDs) + print("[Client] Using ID-based test selection") + test_ids_list = [i.strip() for i in self.test_ids.split(',') if i.strip()] + + if self.id_col and self.id_col in self._df.columns: + id_series = self._df[self.id_col] + # Align types between id_series and test_ids_list + if np.issubdtype(id_series.dtype, np.number): + test_ids_list = [int(i) for i in test_ids_list] + else: + test_ids_list = [str(i) for i in test_ids_list] + + else: + print(f"[Client] Falling back to line numbers (index) as IDs since id_col='{self.id_col}' is invalid or not provided") + id_series = self._df.index + try: + test_ids_list = [int(i) for i in test_ids_list] + except ValueError: + raise ValueError("Test IDs must be integers when using line numbers as IDs") + + test_mask = id_series.isin(test_ids_list) + if not test_mask.any(): + print("[Client] Warning: No matching IDs found for test set; it will be empty") + + X_test = X_all[test_mask] + y_test = y_all[test_mask] + X_trval = X_all[~test_mask] + y_trval = y_all[~test_mask] + + actual_test_frac = len(y_test) / len(y_all) if len(y_all) > 0 else 0.0 + if val_frac + actual_test_frac >= 1.0: + raise ValueError(f"Validation fraction {val_frac} + actual test fraction {actual_test_frac} >= 1.0") + + self.effective_test_frac = actual_test_frac # For logging + + else: # Fraction-based mode (existing) + if not (0.0 <= test_frac < 1.0 and (val_frac + test_frac) < 1.0): + raise ValueError(f"Invalid fractions: val={val_frac}, test={test_frac} (require 0 <= val,test < 1 and val+test < 1)") + + strat_all = y_all if len(np.unique(y_all)) > 1 else None + X_trval, X_test, y_trval, y_test = train_test_split( + X_all, y_all, test_size=test_frac, random_state=self.seed, stratify=strat_all + ) + + # Split val from trval (common to both modes) + if val_frac > 0 and len(y_trval) > 0: + actual_test_frac = len(y_test) / len(y_all) if len(y_all) > 0 else 0.0 + rel_val = val_frac / (1.0 - actual_test_frac) if (1.0 - actual_test_frac) > 0 else 0.0 + strat_tr = y_trval if len(np.unique(y_trval)) > 1 else None + X_train, X_val, y_train, y_val = train_test_split( + X_trval, y_trval, test_size=rel_val, random_state=self.seed, stratify=strat_tr + ) + else: + X_train, y_train = X_trval, y_trval + X_val, y_val = np.empty((0, X_all.shape[1])), np.empty((0,)) + + # ---------- build loaders + batch_size = self.dp_config.batch_size if self.dp_config else 32 + self.train_loader = self._mk_loader(X_train, y_train, batch_size, shuffle=True) + self.val_loader = self._mk_loader(X_val, y_val, batch_size=batch_size, shuffle=False) if len(y_val) else None + self.test_loader = self._mk_loader(X_test, y_test, batch_size, shuffle=False) + + # ---------- model/optimizer + input_dim = X_all.shape[1] + self.model = Net(input_dim) + self.optimizer = optim.SGD(self.model.parameters(), lr=0.01) + + # ---------- attach DP (same behavior as before; only wraps the train loader) + if self.dp_config and not self._dp_attached: + try: + from opacus import PrivacyEngine + privacy_engine = PrivacyEngine() + (self.model, self.optimizer, self.train_loader) = privacy_engine.make_private( + module=self.model, + optimizer=self.optimizer, + data_loader=self.train_loader, + noise_multiplier=self.dp_config.noise_multiplier, + max_grad_norm=self.dp_config.max_grad_norm, + secure_rng=self.dp_config.secure_rng, + ) + self._dp_attached = True + except ImportError: + print("Opacus non installé : exécution sans DP.") + + # ---------- record effective settings + self.effective_features = feat_cols + self.effective_target = target_col + self.effective_val_frac = float(val_frac) + # effective_test_frac already set above if ID mode; otherwise use the input + if self.test_ids and self.test_ids.strip(): + pass # Already set + else: + self.effective_test_frac = float(test_frac) + + self._initialized = True + print(f"[Client] Initialized with features={feat_cols}, target={target_col}, val={val_frac}, test={self.effective_test_frac}") + + # ---------- FL API (unchanged behavior) ---------- + + def get_parameters(self, config): + if not self._initialized: + try: + self._lazy_init_from_server_config(config if isinstance(config, dict) else {}) + except Exception as e: + if not self._initialized: + self._lazy_init_from_server_config({}) + return [val.cpu().numpy() for val in self.model.state_dict().values()] + + def set_parameters(self, parameters): + params_dict = zip(self.model.state_dict().keys(), parameters) + state_dict = {k: torch.tensor(v) for k, v in params_dict} + self.model.load_state_dict(state_dict, strict=True) + + def fit(self, parameters, config): + if not self._initialized: + self._lazy_init_from_server_config(config) + + self.set_parameters(parameters) + self.model.train() + + local_epochs = config.get("local_epochs", 5) + total_loss = 0.0 + print(f"Training for {local_epochs} epochs...") + + for epoch in range(local_epochs): + print(f"Epoch {epoch + 1}/{local_epochs}") + for X_batch, y_batch in self.train_loader: + self.optimizer.zero_grad() + outputs = self.model(X_batch) + loss = self.criterion(outputs.squeeze(), y_batch) + loss.backward() + self.optimizer.step() + total_loss += loss.item() * X_batch.size(0) + + avg_loss = total_loss / (len(self.train_loader.dataset) * local_epochs) + + with torch.no_grad(): + logits = self.model(self.X_tensor).squeeze() + probs = torch.sigmoid(logits).cpu().numpy() + y_true = self.y_tensor.cpu().numpy() + binary_preds = (probs >= 0.5).astype(int) + try: + auc = roc_auc_score(y_true, probs) + except Exception: + auc = float("nan") + acc = accuracy_score(y_true, binary_preds) + + hostname = socket.gethostname() + os_type = platform.system() + metrics = { + "hostname": hostname, + "os_type": os_type, + "train_loss": avg_loss, + "train_accuracy": acc, + "train_auc": auc, + "features": ",".join(self.effective_features), + "target": self.effective_target, + "val_fraction": self.effective_val_frac, + "test_fraction": self.effective_test_frac, + } + + return self.get_parameters(config), len(self.train_loader.dataset), metrics + + def evaluate(self, parameters, config): + if not self._initialized: + self._lazy_init_from_server_config(config) + + self.set_parameters(parameters) + self.model.eval() + + total_loss = 0.0 + all_probs, all_true = [], [] + with torch.no_grad(): + for X_batch, y_batch in self.test_loader: + outputs = self.model(X_batch) + loss = self.criterion(outputs.squeeze(), y_batch) + total_loss += loss.item() * X_batch.size(0) + probs = torch.sigmoid(outputs.squeeze()).cpu().numpy() + all_probs.extend(probs.tolist()) + all_true.extend(y_batch.cpu().numpy().tolist()) + + avg_loss = total_loss / len(self.test_loader.dataset) if len(self.test_loader.dataset) > 0 else 0.0 + binary_preds = [1 if p >= 0.5 else 0 for p in all_probs] + try: + auc = roc_auc_score(all_true, all_probs) + except Exception: + auc = float("nan") + acc = accuracy_score(all_true, binary_preds) + + metrics = { + "eval_loss": avg_loss, + "eval_accuracy": acc, + "eval_auc": auc, + } + print(f"Evaluation metrics: {metrics}") + + return float(avg_loss), len(self.test_loader.dataset), metrics + + def get_properties(self, config): + hostname = socket.gethostname() + os_type = platform.system() + + if self._initialized: + num_samples = int(self.X_tensor.shape[0]) + num_features = int(self.X_tensor.shape[1]) + features_str = ",".join(self.effective_features) + target_name = self.effective_target + label_counts = pd.Series(self.y_tensor.numpy()).value_counts().to_dict() + classes = sorted(label_counts.keys()) + else: + num_samples = len(self.X_tensor) + num_features = self.X_tensor.shape[1] + features_str = ",".join(self.feature_names) + target_name = self.target_name + label_counts = self.label_counts + classes = self.classes + + classes_str = ",".join(map(str, classes)) + dist_str = ",".join(f"{cls}:{cnt}" for cls, cnt in label_counts.items()) + + cpu_physical = psutil.cpu_count(logical=False) + cpu_logical = psutil.cpu_count(logical=True) + total_mem_gb = round(psutil.virtual_memory().total / (1024**3), 2) + driver_present = shutil.which('nvidia-smi') is not None + gpu_count = 0 + if GPUtil and driver_present: + try: + gpu_count = len(GPUtil.getGPUs()) + except Exception: + gpu_count = 0 + + return { + "hostname": hostname, + "os_type": os_type, + "num_samples": num_samples, + "num_features": num_features, + "features": features_str, + "target": target_name, + "classes": classes_str, + "label_distribution": dist_str, + "cpu_physical_cores": cpu_physical, + "cpu_logical_cores": cpu_logical, + "total_memory_gb": total_mem_gb, + "gpu_driver_present": str(driver_present), + "gpu_count": gpu_count, + } + + def start(self): + fl.client.start_numpy_client( + server_address=self.server_address, + client=self, + ) diff --git a/build/lib/MEDfl/rw/model.py b/build/lib/MEDfl/rw/model.py new file mode 100644 index 0000000..3c50a1f --- /dev/null +++ b/build/lib/MEDfl/rw/model.py @@ -0,0 +1,147 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +_ACTS = { + "relu": nn.ReLU, + "gelu": nn.GELU, + "leaky_relu": lambda: nn.LeakyReLU(0.01), + "tanh": nn.Tanh, + "elu": nn.ELU, + "silu": nn.SiLU, + "identity": nn.Identity, +} + +def _make_activation(name): + if name not in _ACTS: + raise ValueError(f"Unsupported activation '{name}'. Choose from {list(_ACTS)}") + return _ACTS[name]() + +class Net(nn.Module): + """ + Flexible MLP with task-aware output. + - hidden_dims: e.g., [128, 64, 32] + - activation: 'relu' | 'gelu' | 'leaky_relu' | ... + - batchnorm: apply BatchNorm1d after each linear (except output) + - dropout: float in [0,1] or list per hidden layer + - task: 'binary' | 'multiclass' | 'multilabel' | 'regression' + - num_classes: required for 'multiclass'; for 'binary' ignore; for 'multilabel' set to label count + - return_logits: always True for training (recommended). Use .predict() for post-activation outputs. + """ + + def __init__( + self, + input_dim, + hidden_dims=[64, 32], + activation="relu", + batchnorm=True, + dropout=0.0, + task="binary", + num_classes=None, + output_bias=True, + return_logits=True, + weight_init="kaiming", + ): + super().__init__() + self.task = task + self.num_classes = num_classes + self.return_logits = return_logits + self.act = activation + + if task == "multiclass": + if not num_classes or num_classes < 2: + raise ValueError("For 'multiclass', num_classes >= 2 is required.") + output_dim = num_classes + elif task == "binary": + output_dim = 1 + elif task == "multilabel": + if not num_classes or num_classes < 1: + raise ValueError("For 'multilabel', set num_classes = number of labels.") + output_dim = num_classes + elif task == "regression": + output_dim = 1 + else: + raise ValueError(f"Unknown task: {task}") + + # Normalize dropout to list per hidden layer + if isinstance(dropout, (int, float)): + dropout = [float(dropout)] * len(hidden_dims) + elif dropout is None: + dropout = [0.0] * len(hidden_dims) + elif isinstance(dropout, list): + if len(dropout) != len(hidden_dims): + raise ValueError("Length of dropout list must match hidden_dims.") + else: + raise ValueError("dropout must be float|list[float]|None") + + self.blocks = nn.ModuleList() + in_dim = input_dim + for h, p in zip(hidden_dims, dropout): + block = nn.ModuleDict({ + "lin": nn.Linear(in_dim, h, bias=True), + "bn": nn.BatchNorm1d(h) if batchnorm else nn.Identity(), + "act": _make_activation(activation), + "drop": nn.Dropout(p) if p and p > 0 else nn.Identity(), + }) + self.blocks.append(block) + in_dim = h + + self.out = nn.Linear(in_dim, output_dim, bias=output_bias) + + # Weight init + if weight_init != "none": + self.apply(lambda m: self._init_weights(m, scheme=weight_init, activation=activation)) + + @staticmethod + def _init_weights(m, scheme, activation): + if isinstance(m, nn.Linear): + if scheme == "kaiming": + nonlinearity = "leaky_relu" if activation == "leaky_relu" else "relu" + nn.init.kaiming_uniform_( + m.weight, + a=math.sqrt(5) if activation == "leaky_relu" else 0, + nonlinearity=nonlinearity, + ) + elif scheme == "xavier": + nn.init.xavier_uniform_(m.weight) + if m.bias is not None: + fan_in, _ = nn.init._calculate_fan_in_and_fan_out(m.weight) + bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 + nn.init.uniform_(m.bias, -bound, bound) + + def forward(self, x): + for blk in self.blocks: + x = blk["lin"](x) + x = blk["bn"](x) + x = blk["act"](x) + x = blk["drop"](x) + logits = self.out(x) + return logits if self.return_logits else self._apply_output_activation(logits) + + # Inference helpers + def _apply_output_activation(self, logits): + if self.task == "binary": + return torch.sigmoid(logits) # (B, 1) + elif self.task == "multiclass": + return F.softmax(logits, dim=-1) # (B, C) + elif self.task == "multilabel": + return torch.sigmoid(logits) # (B, L) + elif self.task == "regression": + return logits # raw regression output + else: + raise RuntimeError("Invalid task") + + @torch.no_grad() + def predict(self, x): + out = self.forward(x) + probs = self._apply_output_activation(out) if self.return_logits else out + + if self.task == "binary": + return (probs >= 0.5).long() + elif self.task == "multiclass": + return probs.argmax(dim=-1) + elif self.task == "multilabel": + return (probs >= 0.5).long() + elif self.task == "regression": + return probs diff --git a/build/lib/MEDfl/rw/server.py b/build/lib/MEDfl/rw/server.py new file mode 100644 index 0000000..178658a --- /dev/null +++ b/build/lib/MEDfl/rw/server.py @@ -0,0 +1,134 @@ +import flwr as fl +from flwr.server.strategy import FedAvg +from flwr.server.server import ServerConfig +from MEDfl.rw.strategy import Strategy +import asyncio +from flwr.server.client_manager import ClientManager +from flwr.server.client_proxy import ClientProxy +from flwr.common import GetPropertiesIns +from flwr.common import GetPropertiesIns + + +class FederatedServer: + """ + FederatedServer wraps the launch and configuration of a Flower federated learning server. + + Attributes: + server_address (str): Server host and port in the format "host:port". + server_config (ServerConfig): Configuration for the Flower server. + strategy_wrapper (Strategy): Wrapper around the actual Flower strategy. + strategy (flwr.server.Strategy): Actual Flower strategy instance. + certificates (Any): Optional TLS certificates. + connected_clients (list): List of connected client IDs. + + Methods: + start(): + Launch the Flower server with the specified strategy and log client connections. + """ + + def __init__( + self, + host="0.0.0.0", + port=8080, + num_rounds=3, + strategy=None, + certificates=None, + ): + """ + Initialize the FederatedServer. + + Args: + host (str): Hostname or IP to bind the server to. + port (int): Port to listen on. + num_rounds (int): Number of federated learning rounds to execute. + strategy (Optional[Strategy]): Optional custom strategy wrapper. + certificates (Optional[Any]): Optional TLS certificates. + """ + # Server address and configuration + self.server_address = f"{host}:{port}" + self.server_config = ServerConfig(num_rounds=num_rounds) + + # Use custom or default strategy + self.strategy_wrapper = strategy or Strategy() + self.strategy_wrapper.create_strategy() + if self.strategy_wrapper.strategy_object is None: + raise ValueError("Strategy object not initialized. Call create_strategy() first.") + self.strategy = self.strategy_wrapper.strategy_object + + self.certificates = certificates + self.connected_clients = [] # Track connected client IDs + + + def start(self): + """ + Start the Flower server with the configured strategy and track client connections. + """ + print(f"Using strategy: {self.strategy_wrapper.name}") + print(f"Starting Flower server on {self.server_address} with strategy {self.strategy_wrapper.name}") + + # Use a custom client manager that logs client connections + client_manager = TrackingClientManager(self) + + # Launch the Flower server + fl.server.start_server( + server_address=self.server_address, + config=self.server_config, + strategy=self.strategy, + certificates=self.certificates, + client_manager=client_manager, + ) + + +class TrackingClientManager(fl.server.client_manager.SimpleClientManager): + """ + TrackingClientManager extends the default SimpleClientManager to log client connections. + + Attributes: + server (FederatedServer): The FederatedServer instance this manager belongs to. + client_properties (dict): Placeholder for storing client-specific properties. + """ + + def __init__(self, server): + """ + Initialize the TrackingClientManager. + + Args: + server (FederatedServer): Reference to the FederatedServer. + """ + super().__init__() + self.server = server + self.client_properties = {} + + def register(self, client): + """ + Register a client and log its connection. + + Args: + client (ClientProxy): The client proxy being registered. + + Returns: + bool: True if the client was registered successfully. + """ + success = super().register(client) + if success and client.cid not in self.server.connected_clients: + # Run the asynchronous hostname fetch synchronously + asyncio.run(self._fetch_and_log_hostname(client)) + return success + + async def _fetch_and_log_hostname(self, client): + """ + Asynchronously fetch and log the client's hostname or CID. + + Args: + client (ClientProxy): The client proxy. + """ + # Optional: uncomment to fetch hostname from client properties + # try: + # ins = GetPropertiesIns(config={}) + # props = await client.get_properties(ins=ins, timeout=10.0, group_id=0) + # hostname = props.properties.get("hostname", "unknown") + # except Exception as e: + # hostname = f"Error: {e}" + + print(f"✅ Client connected - CID: {client.cid}") + self.server.connected_clients.append(client.cid) diff --git a/build/lib/MEDfl/rw/strategy.py b/build/lib/MEDfl/rw/strategy.py new file mode 100644 index 0000000..ba1ce42 --- /dev/null +++ b/build/lib/MEDfl/rw/strategy.py @@ -0,0 +1,302 @@ +# File: MEDfl/rw/strategy.py + +import os +import numpy as np +import flwr as fl +from flwr.common import GetPropertiesIns +from flwr.server.client_manager import ClientManager +from flwr.server.client_proxy import ClientProxy +import time +from MEDfl.rw.model import Net +import torch + +# =================================================== +# Custom metric aggregation functions +# =================================================== +def aggregate_fit_metrics(results): + total = sum(n for n, _ in results) + loss = sum(m.get("train_loss", 0.0) * n for n, m in results) / total + acc = sum(m.get("train_accuracy", 0.0) * n for n, m in results) / total + auc = sum(m.get("train_auc", 0.0) * n for n, m in results) / total + return {"train_loss": loss, "train_accuracy": acc, "train_auc": auc} + +def aggregate_eval_metrics(results): + total = sum(n for n, _ in results) + loss = sum(m.get("eval_loss", 0.0) * n for n, m in results) / total + acc = sum(m.get("eval_accuracy", 0.0) * n for n, m in results) / total + auc = sum(m.get("eval_auc", 0.0) * n for n, m in results) / total + return {"eval_loss": loss, "eval_accuracy": acc, "eval_auc": auc} + +# =================================================== +# Strategy Wrapper +# =================================================== +class Strategy: + """ + Flower Strategy wrapper: + - Dynamic hyperparameters via on_fit_config_fn + - Custom metric aggregation + - Per-client & aggregated metric logging + - Synchronous get_properties() inspection in configure_fit() + - Saving global parameters every saveOnRounds to savingPath + + Extended: + - split_mode: + * "global": use global val_fraction/test_fraction for all clients + * "per_client": use client_fractions[hostname] if present + - client_fractions: + { + "HOSTNAME_1": { + "val_fraction": float (optional), + "test_fraction": float (optional), + "test_ids": [..] or "id1,id2" (optional) + }, + ... + } + - In per_client mode: + * if test_ids is present for a client: + -> send test_ids + -> do NOT use that client's test_fraction + * otherwise: + -> use that client's val_fraction/test_fraction if provided, + else fall back to global val_fraction/test_fraction + - client id in this mapping = hostname from client.get_properties() + - id_col: + * column name used on clients to match test_ids (default "id") + """ + + def __init__( + self, + name="FedAvg", + fraction_fit=1.0, + fraction_evaluate=1.0, + min_fit_clients=2, + min_evaluate_clients=2, + min_available_clients=2, + initial_parameters=None, + evaluate_fn=None, + fit_metrics_aggregation_fn=None, + evaluate_metrics_aggregation_fn=None, + local_epochs=1, + threshold=0.5, + learning_rate=0.01, + optimizer_name="SGD", + savingPath="", + saveOnRounds=3, + total_rounds=3, + features="", + target="", + val_fraction=0.10, + test_fraction=0.10, + # NEW: splitting control (added at the end to not break existing calls) + split_mode="global", # "global" or "per_client" + client_fractions=None, + # NEW: id column for test_ids mapping + id_col="id", + ): + self.name = name + self.fraction_fit = fraction_fit + self.fraction_evaluate = fraction_evaluate + self.min_fit_clients = min_fit_clients + self.min_evaluate_clients = min_evaluate_clients + self.min_available_clients = min_available_clients + self.initial_parameters = initial_parameters or [] + self.evaluate_fn = evaluate_fn + + self.fit_metrics_aggregation_fn = fit_metrics_aggregation_fn or aggregate_fit_metrics + self.evaluate_metrics_aggregation_fn = evaluate_metrics_aggregation_fn or aggregate_eval_metrics + + # Dynamic hyperparams + self.local_epochs = local_epochs + self.threshold = threshold + self.learning_rate = learning_rate + self.optimizer_name = optimizer_name + self.savingPath = savingPath + self.saveOnRounds = saveOnRounds + self.total_rounds = total_rounds + self._features = features # comma-separated or "" + self._target = target # or "" + self._val_fraction = val_fraction + self._test_fraction = test_fraction + + # NEW + self.split_mode = split_mode + self.client_fractions = client_fractions or {} + self.id_col = id_col + + self.strategy_object = None + + def create_strategy(self): + # 1) Pick the Flower Strategy class + StrategyClass = getattr(fl.server.strategy, self.name) + + # 2) Define on_fit_config_fn _before_ instantiation (global defaults) + def fit_config_fn(server_round): + return { + "local_epochs": self.local_epochs, + "threshold": self.threshold, + "learning_rate": self.learning_rate, + "optimizer": self.optimizer_name, + "features": self._features, + "target": self._target, + "val_fraction": float(self._val_fraction), + "test_fraction": float(self._test_fraction), + # NEW: always send id_col so clients know which column to use for test_ids + "id_col": self.id_col, + } + + # 3) Build params including on_fit_config_fn + params = { + "fraction_fit": self.fraction_fit, + "fraction_evaluate": self.fraction_evaluate, + "min_fit_clients": self.min_fit_clients, + "min_evaluate_clients": self.min_evaluate_clients, + "min_available_clients": self.min_available_clients, + "evaluate_fn": self.evaluate_fn, + "on_fit_config_fn": fit_config_fn, + "fit_metrics_aggregation_fn": self.fit_metrics_aggregation_fn, + "evaluate_metrics_aggregation_fn": self.evaluate_metrics_aggregation_fn, + } + if self.initial_parameters: + params["initial_parameters"] = fl.common.ndarrays_to_parameters(self.initial_parameters) + else: + # derive initial params from server-specified features + feat_cols = [c.strip() for c in (self._features or "").split(",") if c.strip()] + if not feat_cols: + raise ValueError( + "No initial_parameters provided and 'features' is empty. " + "Provide Strategy(..., features='col1,col2,...') or pass initial_parameters." + ) + input_dim = len(feat_cols) + _model = Net(input_dim) + _arrays = [t.detach().cpu().numpy() for t in _model.state_dict().values()] + params["initial_parameters"] = fl.common.ndarrays_to_parameters(_arrays) + + # 4) Instantiate the real Flower strategy + strat = StrategyClass(**params) + + # 5) Wrap aggregate_fit for logging (prints unchanged) + original_agg_fit = strat.aggregate_fit + + def logged_agg_fit(server_round, results, failures): + print(f"\n[Server] 🔄 Round {server_round} - Client Training Metrics:") + for i, (client_id, fit_res) in enumerate(results): + print(f" CTM Round {server_round} Client:{client_id.cid}: {fit_res.metrics}") + agg_params, metrics = original_agg_fit(server_round, results, failures) + print(f"[Server] ✅ Round {server_round} - Aggregated Training Metrics: {metrics}\n") + # save the model parameters if savingPath is set on each saveOnRounds + if self.savingPath and ( + (server_round % self.saveOnRounds == 0) + or (self.total_rounds and server_round == self.total_rounds) + ): + arrays = fl.common.parameters_to_ndarrays(agg_params) + # Determine filename: final_model on last round else round_{n} + filename = ( + f"round_{server_round}_final_model.npz" + if server_round == self.total_rounds + else f"round_{server_round}_model.npz" + ) + filepath = os.path.join(self.savingPath, filename) + np.savez(filepath, *arrays) + return agg_params, metrics + + strat.aggregate_fit = logged_agg_fit + + # 6) Wrap aggregate_evaluate for logging (prints unchanged) + original_agg_eval = strat.aggregate_evaluate + + def logged_agg_eval(server_round, results, failures): + print(f"\n[Server] 📊 Round {server_round} - Client Evaluation Metrics:") + for i, (client_id, eval_res) in enumerate(results): + print(f" CEM Round {server_round} Client:{client_id.cid}: {eval_res.metrics}") + loss, metrics = original_agg_eval(server_round, results, failures) + print(f"[Server] ✅ Round {server_round} - Aggregated Evaluation Metrics:") + print(f" Loss: {loss}, Metrics: {metrics}\n") + return loss, metrics + + strat.aggregate_evaluate = logged_agg_eval + + # 7) Wrap configure_fit to: + # - log client properties (unchanged) + # - apply split_mode/client_fractions to fit_ins.config (NEW) + original_conf_fit = strat.configure_fit + + def wrapped_conf_fit( + server_round, + parameters, + client_manager + ): + selected = original_conf_fit( + server_round=server_round, + parameters=parameters, + client_manager=client_manager + ) + + ins = GetPropertiesIns(config={}) + + for client, fit_ins in selected: + hostname = None + try: + props = client.get_properties(ins=ins, timeout=10.0, group_id=0) + print(f"\n📋 [Round {server_round}] Client {client.cid} Properties: {props.properties}") + hostname = props.properties.get("hostname", None) + except Exception as e: + print(f"⚠️ Failed to get properties from {client.cid}: {e}") + + # Fallback: if no hostname returned, use Flower cid + if not hostname: + hostname = client.cid + + # Keep same object + cfg = fit_ins.config + + if self.split_mode == "per_client": + # Lookup by hostname (preferred) or cid + per_cfg = ( + self.client_fractions.get(hostname) + or self.client_fractions.get(client.cid) + or {} + ) + + # val_fraction: per-client override if present + if "val_fraction" in per_cfg: + try: + cfg["val_fraction"] = float(per_cfg["val_fraction"]) + except Exception: + pass # keep existing if invalid + + # test: prefer test_ids if provided + if "test_ids" in per_cfg and per_cfg["test_ids"]: + test_ids_val = per_cfg["test_ids"] + if isinstance(test_ids_val, (list, tuple, set)): + test_ids_str = ",".join(str(x) for x in test_ids_val) + else: + test_ids_str = str(test_ids_val) + cfg["test_ids"] = test_ids_str + # when using explicit IDs, do not force a test_fraction for this client + if "test_fraction" in cfg: + del cfg["test_fraction"] + # ensure id_col is sent so client can map IDs + cfg["id_col"] = self.id_col + else: + # no test_ids -> use per-client test_fraction if present + if "test_fraction" in per_cfg: + try: + cfg["test_fraction"] = float(per_cfg["test_fraction"]) + except Exception: + pass # keep existing if invalid + # if no test_ids: id_col not strictly required, leave as-is + else: + # split_mode == "global": enforce global fractions, clear any test_ids + if "test_ids" in cfg: + del cfg["test_ids"] + cfg["val_fraction"] = float(self._val_fraction) + cfg["test_fraction"] = float(self._test_fraction) + # also send id_col so clients know column name if needed + cfg["id_col"] = self.id_col + + return selected + + strat.configure_fit = wrapped_conf_fit + + # 8) Save the ready-to-use strategy + self.strategy_object = strat diff --git a/build/lib/MEDfl/scripts/__init__.py b/build/lib/MEDfl/scripts/__init__.py new file mode 100644 index 0000000..0e0ec23 --- /dev/null +++ b/build/lib/MEDfl/scripts/__init__.py @@ -0,0 +1,2 @@ +# from .base import * +# from .create_db import * \ No newline at end of file diff --git a/build/lib/MEDfl/scripts/base.py b/build/lib/MEDfl/scripts/base.py new file mode 100644 index 0000000..1f5030a --- /dev/null +++ b/build/lib/MEDfl/scripts/base.py @@ -0,0 +1,30 @@ +import mysql.connector +from sqlalchemy import create_engine, text +from configparser import ConfigParser +import yaml +import pkg_resources +import os + +# Get the directory of the current script +current_directory = os.path.dirname(os.path.abspath(__file__)) + +# Load configuration from the config file +config_file_path = os.path.join(current_directory, 'db_config.ini') + +config = ConfigParser() +config.read(config_file_path) +mysql_config = config['mysql'] + + + +connection_string = ( + f"mysql+mysqlconnector://{mysql_config['user']}:{mysql_config['password']}@" + f"{mysql_config['host']}:{mysql_config['port']}/{mysql_config['database']}" +) + +eng = create_engine( + connection_string, + execution_options={"autocommit": True}, +) + +my_eng = eng.connect() \ No newline at end of file diff --git a/build/lib/MEDfl/scripts/create_db.py b/build/lib/MEDfl/scripts/create_db.py new file mode 100644 index 0000000..a98cafb --- /dev/null +++ b/build/lib/MEDfl/scripts/create_db.py @@ -0,0 +1,126 @@ +import sys +import sqlite3 +import pandas as pd +from configparser import ConfigParser +import os +import ast + +from MEDfl.LearningManager.utils import * + + +def main(csv_file_path): + try: + # Get the directory of the current script + current_directory = os.path.dirname(os.path.abspath(__file__)) + + # Load configuration from the config file + # config_file_path = os.path.join(current_directory, 'sqllite_config.ini')* + + config_file_path = load_db_config() + + # config = ConfigParser() + # config.read(config_file_path) + # sqlite_config = config['sqllite'] + + sqlite_config = config_file_path + + + print('Im here !') + + # Connect to SQLite database (it will be created if it doesn't exist) + database_path = sqlite_config['database'] + conn = sqlite3.connect(database_path) + cursor = conn.cursor() + + # Drop each table if it exists + tables = ['Networks', 'FLsetup', 'Nodes', 'DataSets', 'FLpipeline', 'testResults', 'FedDatasets'] + for table in tables: + cursor.execute(f"DROP TABLE IF EXISTS {table}") + + # Create Networks table + cursor.execute( + "CREATE TABLE Networks( \ + NetId INTEGER PRIMARY KEY AUTOINCREMENT, \ + NetName TEXT \ + );" + ) + + # Create FLsetup table + cursor.execute("CREATE TABLE FLsetup (\ + FLsetupId INTEGER PRIMARY KEY AUTOINCREMENT,\ + name TEXT NOT NULL, \ + description TEXT NOT NULL,\ + creation_date TEXT NOT NULL,\ + NetId INTEGER NOT NULL,\ + column_name TEXT\ + )") + + # Create Nodes table + cursor.execute("CREATE TABLE Nodes ( \ + NodeId INTEGER PRIMARY KEY AUTOINCREMENT,\ + NodeName TEXT,\ + train BOOLEAN DEFAULT 1,\ + NetId INTEGER\ + )") + + data_df = pd.read_csv(csv_file_path) + columns = data_df.columns.tolist() + column_map = {"object": "TEXT", "int64": "INTEGER", "float64": "REAL"} + sub_query = ", ".join(f"{col} {column_map[str(data_df[col].dtype)]}" for col in columns) + + # Create Datasets table by getting columns from the master csv file + cursor.execute( + f"CREATE TABLE DataSets( \ + DataSetId INTEGER PRIMARY KEY AUTOINCREMENT, \ + DataSetName TEXT, \ + NodeId INTEGER,\ + {sub_query}\ + )" + ) + + # Create FLpipeline table + cursor.execute("CREATE TABLE FLpipeline(\ + id INTEGER PRIMARY KEY AUTOINCREMENT,\ + name TEXT NOT NULL, \ + description TEXT NOT NULL,\ + creation_date TEXT NOT NULL,\ + results TEXT NOT NULL\ + ) ") + + # Create test results table + cursor.execute("CREATE TABLE testResults(\ + pipelineId INTEGER,\ + nodename TEXT NOT NULL, \ + confusionmatrix TEXT,\ + accuracy REAL,\ + sensivity REAL,\ + ppv REAL,\ + npv REAL,\ + f1score REAL,\ + fpr REAL,\ + tpr REAL, \ + PRIMARY KEY (pipelineId, nodename)\ + ) ") + + # Create FederatedDataset table + cursor.execute("CREATE TABLE FedDatasets (\ + FedId INTEGER PRIMARY KEY AUTOINCREMENT,\ + FLsetupId INTEGER,\ + FLpipeId INTEGER,\ + name TEXT NOT NULL\ + )") + + # Commit and close the cursor + conn.commit() + cursor.close() + conn.close() + + except sqlite3.Error as e: + print(f"Error: {e}") + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python script.py ") + sys.exit(1) + csv_file_path = sys.argv[1] + main(csv_file_path) diff --git a/build/lib/alembic/__init__.py b/build/lib/alembic/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/build/lib/alembic/env.py b/build/lib/alembic/env.py new file mode 100644 index 0000000..14593a4 --- /dev/null +++ b/build/lib/alembic/env.py @@ -0,0 +1,61 @@ +from logging.config import fileConfig +import logging +from sqlalchemy import engine_from_config, create_engine +from sqlalchemy import pool +import sys +import os +from alembic import context + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +from scripts.base import my_eng + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = None + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. +def configure_logger(name): + # This is the standard logging configuration + logging.config.fileConfig( + 'alembic_logging.ini', # Path to your logging configuration file + defaults={ + 'logfilename': 'alembic.log', # Log file name + }, + disable_existing_loggers=False, + ) + + return logging.getLogger(name) + + + +def run_migrations_offline(): + """Run migrations in 'offline' mode.""" + pass + +def run_migrations_online(): + """Run migrations in 'online' mode.""" + pass + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..d0c3cbf --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_build/.buildinfo b/docs/_build/.buildinfo new file mode 100644 index 0000000..2188cfb --- /dev/null +++ b/docs/_build/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: c1d55650b2098a4309dc51d204bc0f92 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_build/.doctrees/Medfl.LearningManager.doctree b/docs/_build/.doctrees/Medfl.LearningManager.doctree new file mode 100644 index 0000000..b8b0b3f Binary files /dev/null and b/docs/_build/.doctrees/Medfl.LearningManager.doctree differ diff --git a/docs/_build/.doctrees/Medfl.NetManager.doctree b/docs/_build/.doctrees/Medfl.NetManager.doctree new file mode 100644 index 0000000..e537119 Binary files /dev/null and b/docs/_build/.doctrees/Medfl.NetManager.doctree differ diff --git a/docs/_build/.doctrees/Medfl.doctree b/docs/_build/.doctrees/Medfl.doctree new file mode 100644 index 0000000..090d088 Binary files /dev/null and b/docs/_build/.doctrees/Medfl.doctree differ diff --git a/docs/_build/.doctrees/environment.pickle b/docs/_build/.doctrees/environment.pickle new file mode 100644 index 0000000..176b903 Binary files /dev/null and b/docs/_build/.doctrees/environment.pickle differ diff --git a/docs/_build/.doctrees/index.doctree b/docs/_build/.doctrees/index.doctree new file mode 100644 index 0000000..0112e82 Binary files /dev/null and b/docs/_build/.doctrees/index.doctree differ diff --git a/docs/_build/.doctrees/modules.doctree b/docs/_build/.doctrees/modules.doctree new file mode 100644 index 0000000..a7cf6a7 Binary files /dev/null and b/docs/_build/.doctrees/modules.doctree differ diff --git a/docs/_build/.nojekyll b/docs/_build/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/docs/_build/Medfl.LearningManager.html b/docs/_build/Medfl.LearningManager.html new file mode 100644 index 0000000..95c7897 --- /dev/null +++ b/docs/_build/Medfl.LearningManager.html @@ -0,0 +1,1366 @@ + + + + + + + MEDfl.LearningManager package — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

MEDfl.LearningManager package

+
+

Submodules

+
+
+

MEDfl.LearningManager.client module

+
+
+class MEDfl.LearningManager.client.FlowerClient(cid: str, local_model: Model, trainloader: DataLoader, valloader: DataLoader, diff_priv: bool = True)[source]
+

Bases: NumPyClient

+

FlowerClient class for creating MEDfl clients.

+
+
+cid
+

Client ID.

+
+
Type:
+

str

+
+
+
+ +
+
+local_model
+

Local model of the federated learning network.

+
+
Type:
+

Model

+
+
+
+ +
+
+trainloader
+

DataLoader for training data.

+
+
Type:
+

DataLoader

+
+
+
+ +
+
+valloader
+

DataLoader for validation data.

+
+
Type:
+

DataLoader

+
+
+
+ +
+
+diff_priv
+

Flag indicating whether to use differential privacy.

+
+
Type:
+

bool

+
+
+
+ +
+
+__init__(cid: str, local_model: Model, trainloader: DataLoader, valloader: DataLoader, diff_priv: bool = True)[source]
+

Initializes the FlowerClient instance.

+
+
Parameters:
+
    +
  • cid (str) – Client ID.

  • +
  • local_model (Model) – Local model of the federated learning network.

  • +
  • trainloader (DataLoader) – DataLoader for training data.

  • +
  • valloader (DataLoader) – DataLoader for validation data.

  • +
  • diff_priv (bool) – Flag indicating whether to use differential privacy.

  • +
+
+
+
+ +
+
+context: Context
+
+ +
+
+evaluate(parameters, config)[source]
+

Evaluates the local model on the validation data and returns the loss and accuracy.

+
+
Parameters:
+
    +
  • parameters – Parameters received from the server.

  • +
  • config – Configuration information.

  • +
+
+
Returns:
+

Loss, number of validation examples, and accuracy information.

+
+
Return type:
+

Tuple

+
+
+
+ +
+
+fit(parameters, config)[source]
+

Fits the local model to the received parameters using federated learning.

+
+
Parameters:
+
    +
  • parameters – Parameters received from the server.

  • +
  • config – Configuration information.

  • +
+
+
Returns:
+

Parameters of the local model, number of training examples, and privacy information.

+
+
Return type:
+

Tuple

+
+
+
+ +
+
+get_parameters(config)[source]
+

Returns the current parameters of the local model.

+
+
Parameters:
+

config – Configuration information.

+
+
Returns:
+

Parameters of the local model.

+
+
Return type:
+

Numpy array

+
+
+
+ +
+
+validate()[source]
+

Validates cid, local_model, trainloader, valloader.

+
+ +
+ +
+
+

MEDfl.LearningManager.dynamicModal module

+
+
+class MEDfl.LearningManager.dynamicModal.DynamicModel[source]
+

Bases: object

+

DynamicModel class for creating various types of neural network models.

+
+
+static create_binary_classifier(input_dim, hidden_dims, output_dim, activation='relu', dropout_rate=0.0, batch_norm=False, use_gpu=False)[source]
+

Creates a binary classifier neural network model with customizable architecture.

+
+
Parameters:
+
    +
  • input_dim (int) – Dimension of the input data.

  • +
  • hidden_dims (List[int]) – List of dimensions for hidden layers.

  • +
  • output_dim (int) – Dimension of the output (number of classes).

  • +
  • activation (str, optional) – Activation function for hidden layers. Default is ‘relu’.

  • +
  • dropout_rate (float, optional) – Dropout rate for regularization. Default is 0.0 (no dropout).

  • +
  • batch_norm (bool, optional) – Whether to apply batch normalization. Default is False.

  • +
  • use_gpu (bool, optional) – Whether to use GPU acceleration. Default is False.

  • +
+
+
Returns:
+

Created PyTorch model.

+
+
Return type:
+

torch.nn.Module

+
+
+
+ +
+
+static create_convolutional_neural_network(input_channels, output_channels, kernel_size, use_gpu=False)[source]
+

Creates a convolutional neural network (CNN) model.

+
+
Parameters:
+
    +
  • input_channels (int) – Number of input channels.

  • +
  • output_channels (int) – Number of output channels.

  • +
  • kernel_size (int) – Size of the convolutional kernel.

  • +
+
+
Returns:
+

Created PyTorch model.

+
+
Return type:
+

torch.nn.Module

+
+
+
+ +
+
+static create_linear_regressor(input_dim, output_dim, use_gpu=False)[source]
+

Creates a linear regressor neural network model.

+
+
Parameters:
+
    +
  • input_dim (int) – Dimension of the input data.

  • +
  • output_dim (int) – Dimension of the output.

  • +
+
+
Returns:
+

Created PyTorch model.

+
+
Return type:
+

torch.nn.Module

+
+
+
+ +
+
+static create_logistic_regressor(input_dim, use_gpu=False)[source]
+

Creates a logistic regressor neural network model.

+
+
Parameters:
+

input_dim (int) – Dimension of the input data.

+
+
Returns:
+

Created PyTorch model.

+
+
Return type:
+

torch.nn.Module

+
+
+
+ +
+
+static create_lstm_network(input_size, hidden_size, use_gpu=False)[source]
+

Creates a Long Short-Term Memory (LSTM) network model.

+
+
Parameters:
+
    +
  • input_size (int) – Size of the input layer.

  • +
  • hidden_size (int) – Size of the hidden layer.

  • +
+
+
Returns:
+

Created PyTorch model.

+
+
Return type:
+

torch.nn.Module

+
+
+
+ +
+
+create_model(model_type: str, params_dict={}) Module[source]
+

Create a specific type of model dynamically based on the given parameters.

+
+
Parameters:
+
    +
  • model_type (str) – Type of the model to create (‘Binary Classifier’, ‘Multiclass Classifier’, ‘Linear Regressor’, ‘Logistic Regressor’, ‘SVM’, ‘Neural Network Classifier’, ‘Convolutional Neural Network’, ‘Recurrent Neural Network’, ‘LSTM Network’, ‘Autoencoder’).

  • +
  • params_dict (dict) – Dictionary containing parameters for model creation.

  • +
+
+
Returns:
+

Created PyTorch model.

+
+
Return type:
+

torch.nn.Module

+
+
+
+ +
+
+static create_multiclass_classifier(input_dim, hidden_dims, output_dim, activation='relu', dropout_rate=0.0, batch_norm=False, use_gpu=False)[source]
+

Creates a multiclass classifier neural network model with customizable architecture.

+
+
Parameters:
+
    +
  • input_dim (int) – Dimension of the input data.

  • +
  • hidden_dims (List[int]) – List of dimensions for hidden layers.

  • +
  • output_dim (int) – Dimension of the output (number of classes).

  • +
  • activation (str, optional) – Activation function for hidden layers. Default is ‘relu’.

  • +
  • dropout_rate (float, optional) – Dropout rate for regularization. Default is 0.0 (no dropout).

  • +
  • batch_norm (bool, optional) – Whether to apply batch normalization. Default is False.

  • +
  • use_gpu (bool, optional) – Whether to use GPU acceleration. Default is False.

  • +
+
+
Returns:
+

Created PyTorch model.

+
+
Return type:
+

torch.nn.Module

+
+
+
+ +
+
+static create_recurrent_neural_network(input_size, hidden_size, use_gpu=False)[source]
+

Creates a recurrent neural network (RNN) model.

+
+
Parameters:
+
    +
  • input_size (int) – Size of the input.

  • +
  • hidden_size (int) – Size of the hidden layer.

  • +
+
+
Returns:
+

Created PyTorch model.

+
+
Return type:
+

torch.nn.Module

+
+
+
+ +
+ +
+
+

MEDfl.LearningManager.federated_dataset module

+
+
+class MEDfl.LearningManager.federated_dataset.FederatedDataset(name: str, train_nodes: list, test_nodes: list, trainloaders: list, valloaders: list, testloaders: list)[source]
+

Bases: object

+
+
+__init__(name: str, train_nodes: list, test_nodes: list, trainloaders: list, valloaders: list, testloaders: list)[source]
+

Represents a Federated Dataset.

+
+
Parameters:
+
    +
  • name – Name of the Federated Dataset.

  • +
  • train_nodes – List of train nodes.

  • +
  • test_nodes – List of test nodes.

  • +
  • trainloaders – List of train data loaders.

  • +
  • valloaders – List of validation data loaders.

  • +
  • testloaders – List of test data loaders.

  • +
+
+
+
+ +
+
+create(FLsetupId: int)[source]
+

Create a new Federated Dataset in the database.

+
+
Parameters:
+

FLsetupId – The FLsetup ID associated with the Federated Dataset.

+
+
+
+ +
+
+update(FLpipeId: int, FedId: int)[source]
+

Update the FLpipe ID associated with the Federated Dataset in the database.

+
+
Parameters:
+
    +
  • FLpipeId – The new FLpipe ID to be updated.

  • +
  • FedId – The Federated Dataset ID.

  • +
+
+
+
+ +
+ +
+
+

MEDfl.LearningManager.flpipeline module

+
+
+class MEDfl.LearningManager.flpipeline.FLpipeline(name: str, description: str, server: FlowerServer)[source]
+

Bases: object

+

FLpipeline class for managing Federated Learning pipelines.

+
+
+name
+

The name of the FLpipeline.

+
+
Type:
+

str

+
+
+
+ +
+
+description
+

A description of the FLpipeline.

+
+
Type:
+

str

+
+
+
+ +
+
+server
+

The FlowerServer object associated with the FLpipeline.

+
+
Type:
+

FlowerServer

+
+
+
+ +
+
+__init__(self, name
+

str, description: str, server: FlowerServer) -> None: +Initialize FLpipeline with the specified name, description, and server.

+
+ +
+
+auto_test(test_frac=1) List[dict][source]
+

Automatically test the FLpipeline on all nodes with the specified test_frac.

+
+
Parameters:
+

test_frac (float, optional) – The fraction of the test data to use. Default is 1.

+
+
Returns:
+

A list of dictionaries containing the node names and the classification reports.

+
+
Return type:
+

List[dict]

+
+
+
+ +
+
+create(result: str) None[source]
+

Create a new FLpipeline entry in the database with the given result.

+
+
Parameters:
+

result (str) – The result string to store in the database.

+
+
+
+ +
+
+delete() None[source]
+

Delete the FLpipeline entry from the database based on its name.

+

Note: This is a placeholder method and needs to be implemented based on your specific database setup.

+
+ +
+
+test_by_node(node_name: str, test_frac=1) dict[source]
+

Test the FLpipeline by node with the specified test_frac.

+
+
Parameters:
+
    +
  • node_name (str) – The name of the node to test.

  • +
  • test_frac (float, optional) – The fraction of the test data to use. Default is 1.

  • +
+
+
Returns:
+

A dictionary containing the node name and the classification report.

+
+
Return type:
+

dict

+
+
+
+ +
+
+validate() None[source]
+

Validate the name, description, and server attributes. +:raises TypeError: If the name is not a string, the description is not a string, + or the server is not a FlowerServer object.

+
+ +
+ +
+
+MEDfl.LearningManager.flpipeline.create_query(name, description, creation_date, result)[source]
+
+ +
+
+

MEDfl.LearningManager.model module

+
+
+class MEDfl.LearningManager.model.Model(model: Module, optimizer: Optimizer, criterion: Callable)[source]
+

Bases: object

+

Model class for training and testing PyTorch neural networks.

+
+
+model
+

PyTorch neural network.

+
+
Type:
+

torch.nn.Module

+
+
+
+ +
+
+optimizer
+

PyTorch optimizer.

+
+
Type:
+

torch.optim.Optimizer

+
+
+
+ +
+
+criterion
+

Loss function.

+
+
Type:
+

Callable

+
+
+
+ +
+
+__init__(model: Module, optimizer: Optimizer, criterion: Callable) None[source]
+

Initialize Model class with the specified model, optimizer, and criterion.

+
+
Parameters:
+
    +
  • model (torch.nn.Module) – PyTorch neural network.

  • +
  • optimizer (torch.optim.Optimizer) – PyTorch optimizer.

  • +
  • criterion (Callable) – Loss function.

  • +
+
+
+
+ +
+
+evaluate(val_loader, device=device(type='cpu')) Tuple[float, float][source]
+

Evaluate the model on the given validation data.

+
+
Parameters:
+
    +
  • val_loader – The data loader for validation data.

  • +
  • device – The device on which to perform the evaluation. Default is ‘cpu’.

  • +
+
+
Returns:
+

The evaluation loss and accuracy.

+
+
Return type:
+

Tuple[float, float]

+
+
+
+ +
+
+get_parameters() List[ndarray][source]
+

Get the parameters of the model as a list of NumPy arrays.

+
+
Returns:
+

The parameters of the model as a list of NumPy arrays.

+
+
Return type:
+

List[np.ndarray]

+
+
+
+ +
+
+static load_model(model_name: str)[source]
+

Loads a PyTorch model from a file.

+
+
Parameters:
+

model_name (str) – Name of the model file to be loaded.

+
+
Returns:
+

Loaded PyTorch model.

+
+
Return type:
+

torch.nn.Module

+
+
+
+ +
+
+static save_model(model, model_name: str)[source]
+

Saves a PyTorch model to a file.

+
+
Parameters:
+
    +
  • model (torch.nn.Module) – PyTorch model to be saved.

  • +
  • model_name (str) – Name of the model file.

  • +
+
+
Raises:
+

Exception – If there is an issue during the saving process.

+
+
Returns:
+

None

+
+
+
+ +
+
+set_parameters(parameters: List[ndarray]) None[source]
+

Set the parameters of the model from a list of NumPy arrays.

+
+
Parameters:
+

parameters (List[np.ndarray]) – The parameters to be set.

+
+
+
+ +
+
+train(train_loader, epoch, device, privacy_engine, diff_priv=False) float[source]
+

Train the model on the given train_loader for one epoch.

+
+
Parameters:
+
    +
  • train_loader – The data loader for training data.

  • +
  • epoch (int) – The current epoch number.

  • +
  • device – The device on which to perform the training.

  • +
  • privacy_engine – The privacy engine used for differential privacy (if enabled).

  • +
  • diff_priv (bool, optional) – Whether differential privacy is used. Default is False.

  • +
+
+
Returns:
+

The value of epsilon used in differential privacy.

+
+
Return type:
+

float

+
+
+
+ +
+
+validate() None[source]
+

Validate model and optimizer.

+
+ +
+ +
+
+

MEDfl.LearningManager.plot module

+
+
+class MEDfl.LearningManager.plot.AccuracyLossPlotter(results_dict)[source]
+

Bases: object

+

A utility class for plotting accuracy and loss metrics based on experiment results.

+
+
Parameters:
+

results_dict (dict) – Dictionary containing experiment results organized by parameters and metrics.

+
+
+
+
+results_dict
+

Dictionary containing experiment results organized by parameters and metrics.

+
+
Type:
+

dict

+
+
+
+ +
+
+parameters
+

List of unique parameters in the experiment results.

+
+
Type:
+

list

+
+
+
+ +
+
+metrics
+

List of unique metrics in the experiment results.

+
+
Type:
+

list

+
+
+
+ +
+
+iterations
+

Range of iterations (rounds or epochs) in the experiment.

+
+
Type:
+

range

+
+
+
+ +
+
+__init__(results_dict)[source]
+

Initialize the AccuracyLossPlotter with experiment results.

+
+
Parameters:
+

results_dict (dict) – Dictionary containing experiment results organized by parameters and metrics.

+
+
+
+ +
+
+plot_accuracy_loss()[source]
+

Plot accuracy and loss metrics for different parameters.

+
+ +
+
+static plot_classification_report(pipeline_name: str)[source]
+

Plot a comparison of classification report metrics between nodes.

+
+
Parameters:
+

pipeline_name (str) – Name of the pipeline.

+
+
Returns:
+

None

+
+
+
+ +
+
+static plot_confusion_Matrix_by_node(node_name: str, pipeline_name: str)[source]
+

Plot a confusion matrix for a specific node in the pipeline.

+
+
Parameters:
+
    +
  • node_name (str) – Name of the node.

  • +
  • pipeline_name (str) – Name of the pipeline.

  • +
+
+
Returns:
+

None

+
+
+
+ +
+
+static plot_global_confusion_matrix(pipeline_name: str)[source]
+

Plot a global confusion matrix based on pipeline results.

+
+
Parameters:
+

pipeline_name (str) – Name of the pipeline.

+
+
Returns:
+

None

+
+
+
+ +
+ +
+
+MEDfl.LearningManager.plot.results_dict = {('LR: 0.001, Optimizer: Adam', 'accuracy'): [0.85, 0.89, 0.92, 0.94, Ellipsis], ('LR: 0.001, Optimizer: Adam', 'loss'): [0.2, 0.15, 0.1, 0.08, Ellipsis], ('LR: 0.01, Optimizer: SGD', 'accuracy'): [0.88, 0.91, 0.93, 0.95, Ellipsis], ('LR: 0.01, Optimizer: SGD', 'loss'): [0.18, 0.13, 0.09, 0.07, Ellipsis], ('LR: 0.1, Optimizer: Adam', 'accuracy'): [0.82, 0.87, 0.91, 0.93, Ellipsis], ('LR: 0.1, Optimizer: Adam', 'loss'): [0.25, 0.2, 0.15, 0.12, Ellipsis]}
+
+
server should have:
+
#len = num of rounds

self.accuracies +self.losses

+
+
+
+
Client should have

# len = num of epochs +self.accuracies +self.losses +self.epsilons +self.deltas

+
+
+

#common things : LR,SGD, Aggregation

+
+ +
+
+

MEDfl.LearningManager.server module

+
+
+class MEDfl.LearningManager.server.FlowerServer(global_model: Model, strategy: Strategy, num_rounds: int, num_clients: int, fed_dataset: FederatedDataset, diff_privacy: bool = False, client_resources: Dict[str, float] | None = {'num_cpus': 1, 'num_gpus': 0.0})[source]
+

Bases: object

+

A class representing the central server for Federated Learning using Flower.

+
+
+global_model
+

The global model that will be federated among clients.

+
+
Type:
+

Model

+
+
+
+ +
+
+strategy
+

The strategy used for federated learning, specifying communication and aggregation methods.

+
+
Type:
+

Strategy

+
+
+
+ +
+
+num_rounds
+

The number of federated learning rounds to perform.

+
+
Type:
+

int

+
+
+
+ +
+
+num_clients
+

The number of clients participating in the federated learning process.

+
+
Type:
+

int

+
+
+
+ +
+
+fed_dataset
+

The federated dataset used for training and evaluation.

+
+
Type:
+

FederatedDataset

+
+
+
+ +
+
+diff_priv
+

Whether differential privacy is used during the federated learning process.

+
+
Type:
+

bool

+
+
+
+ +
+
+accuracies
+

A list to store the accuracy of the global model during each round.

+
+
Type:
+

List[float]

+
+
+
+ +
+
+losses
+

A list to store the loss of the global model during each round.

+
+
Type:
+

List[float]

+
+
+
+ +
+
+flower_clients
+

A list to store the FlowerClient objects representing individual clients.

+
+
Type:
+

List[FlowerClient]

+
+
+
+ +
+
+__init__(global_model: Model, strategy: Strategy, num_rounds: int, num_clients: int, fed_dataset: FederatedDataset, diff_privacy: bool = False, client_resources: Dict[str, float] | None = {'num_cpus': 1, 'num_gpus': 0.0}) None[source]
+

Initialize a FlowerServer object with the specified parameters.

+
+
Parameters:
+
    +
  • global_model (Model) – The global model that will be federated among clients.

  • +
  • strategy (Strategy) – The strategy used for federated learning, specifying communication and aggregation methods.

  • +
  • num_rounds (int) – The number of federated learning rounds to perform.

  • +
  • num_clients (int) – The number of clients participating in the federated learning process.

  • +
  • fed_dataset (FederatedDataset) – The federated dataset used for training and evaluation.

  • +
  • diff_privacy (bool, optional) – Whether differential privacy is used during the federated learning process. +Default is False.

  • +
+
+
+
+ +
+
+client_fn(cid) FlowerClient[source]
+

Return a FlowerClient object for a specific client ID.

+
+
Parameters:
+

cid – The client ID.

+
+
Returns:
+

A FlowerClient object representing the individual client.

+
+
Return type:
+

FlowerClient

+
+
+
+ +
+
+evaluate(server_round: int, parameters: List[ndarray[Any, dtype[Any]]], config: Dict[str, bool | bytes | float | int | str]) Tuple[float, Dict[str, bool | bytes | float | int | str]] | None[source]
+

Evaluate the global model on the validation dataset and update the accuracies and losses.

+
+
Parameters:
+
    +
  • server_round (int) – The current round of the federated learning process.

  • +
  • parameters (fl.common.NDArrays) – The global model parameters.

  • +
  • config (Dict[str, fl.common.Scalar]) – Configuration dictionary.

  • +
+
+
Returns:
+

The evaluation loss and accuracy.

+
+
Return type:
+

Optional[Tuple[float, Dict[str, fl.common.Scalar]]]

+
+
+
+ +
+
+run() None[source]
+

Run the federated learning process using Flower simulation.

+
+
Returns:
+

The history of the accuracies and losses during the training of each node

+
+
Return type:
+

History

+
+
+
+ +
+
+validate() None[source]
+

Validate global_model, strategy, num_clients, num_rounds, fed_dataset, diff_privacy

+
+ +
+ +
+
+

MEDfl.LearningManager.strategy module

+
+
+class MEDfl.LearningManager.strategy.Strategy(name: str = 'FedAvg', fraction_fit: float = 1.0, fraction_evaluate: float = 1.0, min_fit_clients: int = 2, min_evaluate_clients: int = 2, min_available_clients: int = 2, initial_parameters=[], evaluation_methode='centralized')[source]
+

Bases: object

+

A class representing a strategy for Federated Learning.

+
+
+name
+

The name of the strategy. Default is “FedAvg”.

+
+
Type:
+

str

+
+
+
+ +
+
+fraction_fit
+

Fraction of clients to use for training during each round. Default is 1.0.

+
+
Type:
+

float

+
+
+
+ +
+
+fraction_evaluate
+

Fraction of clients to use for evaluation during each round. Default is 1.0.

+
+
Type:
+

float

+
+
+
+ +
+
+min_fit_clients
+

Minimum number of clients to use for training during each round. Default is 2.

+
+
Type:
+

int

+
+
+
+ +
+
+min_evaluate_clients
+

Minimum number of clients to use for evaluation during each round. Default is 2.

+
+
Type:
+

int

+
+
+
+ +
+
+min_available_clients
+

Minimum number of available clients required to start a round. Default is 2.

+
+
Type:
+

int

+
+
+
+ +
+
+initial_parameters
+

The initial parameters of the server model

+
+
Type:
+

Optional[]

+
+
+
+ +

Methods:

+
+
+__init__(name: str = 'FedAvg', fraction_fit: float = 1.0, fraction_evaluate: float = 1.0, min_fit_clients: int = 2, min_evaluate_clients: int = 2, min_available_clients: int = 2, initial_parameters=[], evaluation_methode='centralized') None[source]
+

Initialize a Strategy object with the specified parameters.

+
+
Parameters:
+
    +
  • name (str) – The name of the strategy. Default is “FedAvg”.

  • +
  • fraction_fit (float) – Fraction of clients to use for training during each round. Default is 1.0.

  • +
  • fraction_evaluate (float) – Fraction of clients to use for evaluation during each round. Default is 1.0.

  • +
  • min_fit_clients (int) – Minimum number of clients to use for training during each round. Default is 2.

  • +
  • min_evaluate_clients (int) – Minimum number of clients to use for evaluation during each round. Default is 2.

  • +
  • min_available_clients (int) – Minimum number of available clients required to start a round. Default is 2.

  • +
  • initial_parameters (Optional[]) – The initial parametres of the server model

  • +
  • evaluation_methode ("centralized" | "distributed") –

  • +
+
+
+
+ +
+ +
+
+

MEDfl.LearningManager.utils module

+
+
+MEDfl.LearningManager.utils.custom_classification_report(y_true, y_pred)[source]
+

Compute custom classification report metrics including accuracy, sensitivity, specificity, precision, NPV, +F1-score, false positive rate, and true positive rate.

+
+
Parameters:
+
    +
  • y_true (array-like) – True labels.

  • +
  • y_pred (array-like) – Predicted labels.

  • +
+
+
Returns:
+

A dictionary containing custom classification report metrics.

+
+
Return type:
+

dict

+
+
+
+ +
+
+MEDfl.LearningManager.utils.empty_db()[source]
+

Empty the database by deleting records from multiple tables and resetting auto-increment counters.

+
+
Returns:
+

None

+
+
+
+ +
+
+MEDfl.LearningManager.utils.get_node_confusion_matrix(pipeline_id, node_name)[source]
+

Get the confusion matrix for a specific node in a pipeline based on test results.

+
+
Parameters:
+
    +
  • pipeline_id (int) – ID of the pipeline.

  • +
  • node_name (str) – Name of the node.

  • +
+
+
Returns:
+

A dictionary representing the confusion matrix for the specified node.

+
+
Return type:
+

dict

+
+
+
+ +
+
+MEDfl.LearningManager.utils.get_pipeline_confusion_matrix(pipeline_id)[source]
+

Get the global confusion matrix for a pipeline based on test results.

+
+
Parameters:
+

pipeline_id (int) – ID of the pipeline.

+
+
Returns:
+

A dictionary representing the global confusion matrix.

+
+
Return type:
+

dict

+
+
+
+ +
+
+MEDfl.LearningManager.utils.get_pipeline_from_name(name)[source]
+

Get the pipeline ID from its name in the database.

+
+
Parameters:
+

name (str) – Name of the pipeline.

+
+
Returns:
+

ID of the pipeline.

+
+
Return type:
+

int

+
+
+
+ +
+
+MEDfl.LearningManager.utils.get_pipeline_result(pipeline_id)[source]
+

Get the test results for a pipeline.

+
+
Parameters:
+

pipeline_id (int) – ID of the pipeline.

+
+
Returns:
+

DataFrame containing test results for the specified pipeline.

+
+
Return type:
+

pandas.DataFrame

+
+
+
+ +
+
+MEDfl.LearningManager.utils.test(model, test_loader, device=device(type='cpu'))[source]
+

Evaluate a model using a test loader and return a custom classification report.

+
+
Parameters:
+
    +
  • model (torch.nn.Module) – PyTorch model to evaluate.

  • +
  • test_loader (torch.utils.data.DataLoader) – DataLoader for the test dataset.

  • +
  • device (torch.device, optional) – Device for model evaluation. Default is “cpu”.

  • +
+
+
Returns:
+

A dictionary containing custom classification report metrics.

+
+
Return type:
+

dict

+
+
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/Medfl.NetManager.html b/docs/_build/Medfl.NetManager.html new file mode 100644 index 0000000..976d6be --- /dev/null +++ b/docs/_build/Medfl.NetManager.html @@ -0,0 +1,840 @@ + + + + + + + MEDfl.NetManager package — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

MEDfl.NetManager package

+
+

Submodules

+
+
+

MEDfl.NetManager.dataset module

+
+
+class MEDfl.NetManager.dataset.DataSet(name: str, path: str, engine=None)[source]
+

Bases: object

+
+
+__init__(name: str, path: str, engine=None)[source]
+

Initialize a DataSet object.

+
+
Parameters:
+
    +
  • name (str) – The name of the dataset.

  • +
  • path (str) – The file path of the dataset CSV file.

  • +
+
+
+
+ +
+
+delete_dataset()[source]
+

Delete the dataset from the database.

+

Notes: +- Assumes the dataset name is unique in the ‘DataSets’ table.

+
+ +
+
+static list_alldatasets(engine)[source]
+

List all dataset names from the ‘DataSets’ table.

+
+
Returns:
+

A DataFrame containing the names of all datasets in the ‘DataSets’ table.

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+update_data()[source]
+

Update the data in the dataset.

+

Not implemented yet.

+
+ +
+
+upload_dataset(NodeId=-1)[source]
+

Upload the dataset to the database.

+
+
Parameters:
+

NodeId (int) – The NodeId associated with the dataset.

+
+
+

Notes: +- Assumes the file at self.path is a valid CSV file. +- The dataset is uploaded to the ‘DataSets’ table in the database.

+
+ +
+
+validate()[source]
+

Validate name and path attributes.

+
+
Raises:
+

TypeError – If name or path is not a string.

+
+
+
+ +
+ +
+
+

MEDfl.NetManager.flsetup module

+
+
+class MEDfl.NetManager.flsetup.FLsetup(name: str, description: str, network: Network)[source]
+

Bases: object

+
+
+__init__(name: str, description: str, network: Network)[source]
+

Initialize a Federated Learning (FL) setup.

+
+
Parameters:
+
    +
  • name (str) – The name of the FL setup.

  • +
  • description (str) – A description of the FL setup.

  • +
  • network (Network) – An instance of the Network class representing the network architecture.

  • +
+
+
+
+ +
+
+create()[source]
+

Create an FL setup.

+
+ +
+
+create_dataloader_from_node(node: Node, output, fill_strategy='mean', fit_encode=[], to_drop=[], train_batch_size: int = 32, test_batch_size: int = 1, split_frac: float = 0.2, dataset: Dataset | None = None)[source]
+

Create DataLoader from a Node.

+
+
Parameters:
+
    +
  • node (Node) – The node from which to create DataLoader.

  • +
  • train_batch_size (int) – The batch size for training data.

  • +
  • test_batch_size (int) – The batch size for test data.

  • +
  • split_frac (float) – The fraction of data to be used for training.

  • +
  • dataset (Dataset) – The dataset to use. If None, the method will read the dataset from the node.

  • +
+
+
Returns:
+

The DataLoader instances for training and testing.

+
+
Return type:
+

DataLoader

+
+
+
+ +
+
+create_federated_dataset(output, fill_strategy='mean', fit_encode=[], to_drop=[], val_frac=0.1, test_frac=0.2) FederatedDataset[source]
+

Create a federated dataset.

+
+
Parameters:
+
    +
  • output (string) – the output feature of the dataset

  • +
  • val_frac (float) – The fraction of data to be used for validation.

  • +
  • test_frac (float) – The fraction of data to be used for testing.

  • +
+
+
Returns:
+

The FederatedDataset instance containing train, validation, and test data.

+
+
Return type:
+

FederatedDataset

+
+
+
+ +
+
+create_nodes_from_master_dataset(params_dict: dict)[source]
+

Create nodes from the master dataset.

+
+
Parameters:
+

params_dict (dict) – A dictionary containing parameters for node creation. +- column_name (str): The name of the column in the MasterDataset used to create nodes. +- train_nodes (list): A list of node names that will be used for training. +- test_nodes (list): A list of node names that will be used for testing.

+
+
Returns:
+

A list of Node instances created from the master dataset.

+
+
Return type:
+

list

+
+
+
+ +
+
+delete()[source]
+

Delete the FL setup.

+
+ +
+
+get_flDataSet()[source]
+

Retrieve the federated dataset associated with the FL setup using the FL setup’s name.

+
+
Returns:
+

DataFrame containing the federated dataset information.

+
+
Return type:
+

pandas.DataFrame

+
+
+
+ +
+
+static list_allsetups()[source]
+

List all the FL setups.

+
+
Returns:
+

A DataFrame containing information about all the FL setups.

+
+
Return type:
+

DataFrame

+
+
+
+ +
+
+classmethod read_setup(FLsetupId: int)[source]
+

Read the FL setup by FLsetupId.

+
+
Parameters:
+

FLsetupId (int) – The id of the FL setup to read.

+
+
Returns:
+

An instance of the FLsetup class with the specified FLsetupId.

+
+
Return type:
+

FLsetup

+
+
+
+ +
+
+validate()[source]
+

Validate name, description, and network.

+
+ +
+ +
+
+

MEDfl.NetManager.net_helper module

+
+
+MEDfl.NetManager.net_helper.get_feddataset_id_from_name(name)[source]
+

Get the Federated dataset Id from the FedDatasets table based on the federated dataset name.

+
+
Parameters:
+

name (str) – Federated dataset name.

+
+
Returns:
+

FedId or None if not found.

+
+
Return type:
+

int or None

+
+
+
+ +
+
+MEDfl.NetManager.net_helper.get_flpipeline_from_name(name)[source]
+

Get the FLpipeline Id from the FLpipeline table based on the FL pipeline name.

+
+
Parameters:
+

name (str) – FL pipeline name.

+
+
Returns:
+

FLpipelineId or None if not found.

+
+
Return type:
+

int or None

+
+
+
+ +
+
+MEDfl.NetManager.net_helper.get_flsetupid_from_name(name)[source]
+

Get the FLsetupId from the FLsetup table based on the FL setup name.

+
+
Parameters:
+

name (str) – FL setup name.

+
+
Returns:
+

FLsetupId or None if not found.

+
+
Return type:
+

int or None

+
+
+
+ +
+
+MEDfl.NetManager.net_helper.get_netid_from_name(name)[source]
+

Get the Network Id from the Networks table based on the NetName.

+
+
Parameters:
+

name (str) – Network name.

+
+
Returns:
+

NetId or None if not found.

+
+
Return type:
+

int or None

+
+
+
+ +
+
+MEDfl.NetManager.net_helper.get_nodeid_from_name(name)[source]
+

Get the NodeId from the Nodes table based on the NodeName.

+
+
Parameters:
+

name (str) – Node name.

+
+
Returns:
+

NodeId or None if not found.

+
+
Return type:
+

int or None

+
+
+
+ +
+
+MEDfl.NetManager.net_helper.is_str(data_df, row, x)[source]
+

Check if a column in a DataFrame is of type ‘object’ and convert the value accordingly.

+
+
Parameters:
+
    +
  • data_df (pandas.DataFrame) – DataFrame containing the data.

  • +
  • row (pandas.Series) – Data row.

  • +
  • x (str) – Column name.

  • +
+
+
Returns:
+

Processed value based on the column type.

+
+
Return type:
+

str or float

+
+
+
+ +
+
+MEDfl.NetManager.net_helper.master_table_exists()[source]
+

Check if the MasterDataset table exists in the database.

+
+
Returns:
+

True if the table exists, False otherwise.

+
+
Return type:
+

bool

+
+
+
+ +
+
+MEDfl.NetManager.net_helper.process_data_after_reading(data, output, fill_strategy='mean', fit_encode=[], to_drop=[])[source]
+

Process data after reading from the database, including encoding, dropping columns, and creating a PyTorch TensorDataset.

+
+
Parameters:
+
    +
  • data (pandas.DataFrame) – Input data.

  • +
  • output (str) – Output column name.

  • +
  • fill_strategy (str, optional) – Imputation strategy for missing values. Default is “mean”.

  • +
  • fit_encode (list, optional) – List of columns to be label-encoded. Default is an empty list.

  • +
  • to_drop (list, optional) – List of columns to be dropped from the DataFrame. Default is an empty list.

  • +
+
+
Returns:
+

Processed data as a PyTorch TensorDataset.

+
+
Return type:
+

torch.utils.data.TensorDataset

+
+
+
+ +
+
+MEDfl.NetManager.net_helper.process_eicu(data_df)[source]
+

Process eICU data by filling missing values with mean and replacing NaNs with ‘Unknown’.

+
+
Parameters:
+

data_df (pandas.DataFrame) – Input data.

+
+
Returns:
+

Processed data.

+
+
Return type:
+

pandas.DataFrame

+
+
+
+ +
+
+

MEDfl.NetManager.net_manager_queries module

+
+
+

MEDfl.NetManager.network module

+
+
+class MEDfl.NetManager.network.Network(name: str = '')[source]
+

Bases: object

+

A class representing a network.

+
+
+name
+

The name of the network.

+
+
Type:
+

str

+
+
+
+ +
+
+mtable_exists
+

An integer flag indicating whether the MasterDataset table exists (1) or not (0).

+
+
Type:
+

int

+
+
+
+ +
+
+__init__(name: str = '')[source]
+

Initialize a Network instance.

+
+
Parameters:
+

name (str) – The name of the network.

+
+
+
+ +
+
+add_node(node: Node)[source]
+

Add a node to the network.

+
+
Parameters:
+

node (Node) – The node to add.

+
+
+
+ +
+
+create_master_dataset(path_to_csv: str = 'D:\\ESI\\3CS\\PFE\\last_year\\Code\\MEDfl\\notebooks\\eicu_test.csv')[source]
+

Create the MasterDataset table and insert dataset values.

+
+
Parameters:
+

path_to_csv – Path to the CSV file containing the dataset.

+
+
+
+ +
+
+create_network()[source]
+

Create a new network in the database.

+
+ +
+
+delete_network()[source]
+

Delete the network from the database.

+
+ +
+
+static list_allnetworks()[source]
+

List all networks in the database. +:returns: A DataFrame containing information about all networks in the database. +:rtype: DataFrame

+
+ +
+
+list_allnodes()[source]
+

List all nodes in the network.

+
+
Parameters:
+

None

+
+
Returns:
+

A DataFrame containing information about all nodes in the network.

+
+
Return type:
+

DataFrame

+
+
+
+ +
+
+update_network(FLsetupId: int)[source]
+

Update the network’s FLsetupId in the database.

+
+
Parameters:
+

FLsetupId (int) – The FLsetupId to update.

+
+
+
+ +
+
+use_network(network_name: str)[source]
+

Use a network in the database.

+
+
Parameters:
+

network_name (str) – The name of the network to use.

+
+
Returns:
+

An instance of the Network class if the network exists, else None.

+
+
Return type:
+

Network or None

+
+
+
+ +
+
+validate()[source]
+

Validate name

+
+ +
+ +
+
+

MEDfl.NetManager.node module

+
+
+class MEDfl.NetManager.node.Node(name: str, train: int, test_fraction: float = 0.2, engine=<sqlalchemy.engine.base.Connection object>)[source]
+

Bases: object

+

A class representing a node in the network.

+
+
+name
+

The name of the node.

+
+
Type:
+

str

+
+
+
+ +
+
+train
+

An integer flag representing whether the node is used for training (1) or testing (0).

+
+
Type:
+

int

+
+
+
+ +
+
+test_fraction
+

The fraction of data used for testing when train=1. Default is 0.2.

+
+
Type:
+

float, optional

+
+
+
+ +
+
+__init__(name: str, train: int, test_fraction: float = 0.2, engine=<sqlalchemy.engine.base.Connection object>)[source]
+

Initialize a Node instance.

+
+
Parameters:
+
    +
  • name (str) – The name of the node.

  • +
  • train (int) – An integer flag representing whether the node is used for training (1) or testing (0).

  • +
  • test_fraction (float, optional) – The fraction of data used for testing when train=1. Default is 0.2.

  • +
+
+
+
+ +
+
+assign_dataset(dataset_name: str)[source]
+

Assigning existing dataSet to node +:param dataset_name: The name of the dataset to assign. +:type dataset_name: str

+
+
Returns:
+

None

+
+
+
+ +
+
+check_dataset_compatibility(data_df)[source]
+

Check if the dataset is compatible with the master dataset. +:param data_df: The dataset to check. +:type data_df: DataFrame

+
+
Returns:
+

None

+
+
+
+ +
+
+create_node(NetId: int)[source]
+

Create a node in the database. +:param NetId: The ID of the network to which the node belongs. +:type NetId: int

+
+
Returns:
+

None

+
+
+
+ +
+
+delete_node()[source]
+

Delete the node from the database.

+
+ +
+
+get_dataset(column_name: str | None = None)[source]
+

Get the dataset for the node based on the given column name. +:param column_name: The column name to filter the dataset. Default is None. +:type column_name: str, optional

+
+
Returns:
+

The dataset associated with the node.

+
+
Return type:
+

DataFrame

+
+
+
+ +
+
+list_alldatasets()[source]
+

List all datasets associated with the node. +:returns: A DataFrame containing information about all datasets associated with the node. +:rtype: DataFrame

+
+ +
+
+static list_allnodes()[source]
+

List all nodes in the database. +:returns: A DataFrame containing information about all nodes in the database. +:rtype: DataFrame

+
+ +
+
+unassign_dataset(dataset_name: str)[source]
+

unssigning existing dataSet to node +:param dataset_name: The name of the dataset to assign. +:type dataset_name: str

+
+
Returns:
+

None

+
+
+
+ +
+
+update_node()[source]
+

Update the node information (not implemented).

+
+ +
+
+upload_dataset(dataset_name: str, path_to_csv: str = 'D:\\ESI\\3CS\\PFE\\last_year\\Code\\MEDfl\\notebooks\\eicu_test.csv')[source]
+

Upload the dataset to the database for the node. +:param dataset_name: The name of the dataset. +:type dataset_name: str +:param path_to_csv: Path to the CSV file containing the dataset. Default is the path in params. +:type path_to_csv: str, optional

+
+
Returns:
+

None

+
+
+
+ +
+
+validate()[source]
+

Validate name, train, test_fraction

+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/Medfl.html b/docs/_build/Medfl.html new file mode 100644 index 0000000..52c3f77 --- /dev/null +++ b/docs/_build/Medfl.html @@ -0,0 +1,362 @@ + + + + + + + MEDfl package — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

MEDfl package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/LearningManager/client.html b/docs/_build/_modules/Medfl/LearningManager/client.html new file mode 100644 index 0000000..4286687 --- /dev/null +++ b/docs/_build/_modules/Medfl/LearningManager/client.html @@ -0,0 +1,262 @@ + + + + + + MEDfl.LearningManager.client — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for MEDfl.LearningManager.client

+#!/usr/bin/env python3
+import flwr as fl
+from opacus import PrivacyEngine
+from torch.utils.data import DataLoader
+
+from .model import Model
+from .utils import params
+import torch
+
+
+[docs] +class FlowerClient(fl.client.NumPyClient): + """ + FlowerClient class for creating MEDfl clients. + + Attributes: + cid (str): Client ID. + local_model (Model): Local model of the federated learning network. + trainloader (DataLoader): DataLoader for training data. + valloader (DataLoader): DataLoader for validation data. + diff_priv (bool): Flag indicating whether to use differential privacy. + """ +
+[docs] + def __init__(self, cid: str, local_model: Model, trainloader: DataLoader, valloader: DataLoader, diff_priv: bool = params["diff_privacy"]): + """ + Initializes the FlowerClient instance. + + Args: + cid (str): Client ID. + local_model (Model): Local model of the federated learning network. + trainloader (DataLoader): DataLoader for training data. + valloader (DataLoader): DataLoader for validation data. + diff_priv (bool): Flag indicating whether to use differential privacy. + """ + self.cid = cid + self.local_model = local_model + self.trainloader = trainloader + self.valloader = valloader + self.device = torch.device(f"cuda:{int(self.cid) % 4}" if torch.cuda.is_available() else "cpu") + self.local_model.model.to(self.device) + self.privacy_engine = PrivacyEngine(secure_mode=False) + self.diff_priv = diff_priv + self.epsilons = [] + self.accuracies = [] + self.losses = [] + if self.diff_priv: + model, optimizer, self.trainloader = self.privacy_engine.make_private_with_epsilon( + module=self.local_model.model.train(), + optimizer=self.local_model.optimizer, + data_loader=self.trainloader, + epochs=params["train_epochs"], + target_epsilon=params["EPSILON"], + target_delta=params["DELTA"], + max_grad_norm=params["MAX_GRAD_NORM"], + ) + setattr(self.local_model, "model", model) + setattr(self.local_model, "optimizer", optimizer) + self.validate()
+ + +
+[docs] + def validate(self): + """Validates cid, local_model, trainloader, valloader.""" + if not isinstance(self.cid, str): + raise TypeError("cid argument must be a string") + + if not isinstance(self.local_model, Model): + raise TypeError("local_model argument must be a MEDfl.LearningManager.model.Model") + + if not isinstance(self.trainloader, DataLoader): + raise TypeError("trainloader argument must be a torch.utils.data.dataloader") + + if not isinstance(self.valloader, DataLoader): + raise TypeError("valloader argument must be a torch.utils.data.dataloader") + + if not isinstance(self.diff_priv, bool): + raise TypeError("diff_priv argument must be a bool")
+ + +
+[docs] + def get_parameters(self, config): + """ + Returns the current parameters of the local model. + + Args: + config: Configuration information. + + Returns: + Numpy array: Parameters of the local model. + """ + print(f"[Client {self.cid}] get_parameters") + return self.local_model.get_parameters()
+ + +
+[docs] + def fit(self, parameters, config): + """ + Fits the local model to the received parameters using federated learning. + + Args: + parameters: Parameters received from the server. + config: Configuration information. + + Returns: + Tuple: Parameters of the local model, number of training examples, and privacy information. + """ + print(f"[Client {self.cid}] fit, config: {config}") + self.local_model.set_parameters(parameters) + for _ in range(params["train_epochs"]): + epsilon = self.local_model.train( + self.trainloader, + epoch=_, + device=self.device, + privacy_engine=self.privacy_engine, + diff_priv=self.diff_priv, + ) + self.epsilons.append(epsilon) + print(f"epsilon of client {self.cid} : eps = {epsilon}") + return ( + self.local_model.get_parameters(), + len(self.trainloader), + {"epsilon": epsilon}, + )
+ + +
+[docs] + def evaluate(self, parameters, config): + """ + Evaluates the local model on the validation data and returns the loss and accuracy. + + Args: + parameters: Parameters received from the server. + config: Configuration information. + + Returns: + Tuple: Loss, number of validation examples, and accuracy information. + """ + print(f"[Client {self.cid}] evaluate, config: {config}") + self.local_model.set_parameters(parameters) + loss, accuracy = self.local_model.evaluate( + self.valloader, device=self.device + ) + self.losses.append(loss) + self.accuracies.append(accuracy) + return float(loss), len(self.valloader), {"accuracy": float(accuracy)}
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/LearningManager/dynamicModal.html b/docs/_build/_modules/Medfl/LearningManager/dynamicModal.html new file mode 100644 index 0000000..89b94e6 --- /dev/null +++ b/docs/_build/_modules/Medfl/LearningManager/dynamicModal.html @@ -0,0 +1,424 @@ + + + + + + MEDfl.LearningManager.dynamicModal — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for MEDfl.LearningManager.dynamicModal

+import torch
+import torch.nn as nn
+from sklearn.svm import SVC
+
+
+[docs] +class DynamicModel: + """DynamicModel class for creating various types of neural network models.""" + + # Create a binary classifier model +
+[docs] + @staticmethod + def create_binary_classifier(input_dim, hidden_dims, output_dim, activation='relu', dropout_rate=0.0, + batch_norm=False, use_gpu=False): + """ + Creates a binary classifier neural network model with customizable architecture. + + Args: + input_dim (int): Dimension of the input data. + hidden_dims (List[int]): List of dimensions for hidden layers. + output_dim (int): Dimension of the output (number of classes). + activation (str, optional): Activation function for hidden layers. Default is 'relu'. + dropout_rate (float, optional): Dropout rate for regularization. Default is 0.0 (no dropout). + batch_norm (bool, optional): Whether to apply batch normalization. Default is False. + use_gpu (bool, optional): Whether to use GPU acceleration. Default is False. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + + layers = [] + + for i in range(len(hidden_dims)): + if i == 0: + layers.append(nn.Linear(input_dim, hidden_dims[0])) + else: + layers.append(nn.Linear(hidden_dims[i - 1], hidden_dims[i])) + + if batch_norm: + layers.append(nn.BatchNorm1d(hidden_dims[i])) + + activation_layer = nn.ReLU() if activation == 'relu' else nn.Sigmoid() + layers.append(activation_layer) + + if dropout_rate > 0.0: + layers.append(nn.Dropout(dropout_rate)) + + layers.append(nn.Linear(hidden_dims[-1], output_dim)) + layers.append(nn.Sigmoid()) + + model = nn.Sequential(*layers) + + if use_gpu: + model = model.cuda() + + return model
+ + + # Create a multi-class classifier model +
+[docs] + @staticmethod + def create_multiclass_classifier(input_dim, hidden_dims, output_dim, activation='relu', dropout_rate=0.0, + batch_norm=False, use_gpu=False): + """ + Creates a multiclass classifier neural network model with customizable architecture. + + Args: + input_dim (int): Dimension of the input data. + hidden_dims (List[int]): List of dimensions for hidden layers. + output_dim (int): Dimension of the output (number of classes). + activation (str, optional): Activation function for hidden layers. Default is 'relu'. + dropout_rate (float, optional): Dropout rate for regularization. Default is 0.0 (no dropout). + batch_norm (bool, optional): Whether to apply batch normalization. Default is False. + use_gpu (bool, optional): Whether to use GPU acceleration. Default is False. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + layers = [] + + for i in range(len(hidden_dims)): + if i == 0: + layers.append(nn.Linear(input_dim, hidden_dims[0])) + else: + layers.append(nn.Linear(hidden_dims[i - 1], hidden_dims[i])) + + if batch_norm: + layers.append(nn.BatchNorm1d(hidden_dims[i])) + + activation_layer = nn.ReLU() if activation == 'relu' else nn.Sigmoid() + layers.append(activation_layer) + + if dropout_rate > 0.0: + layers.append(nn.Dropout(dropout_rate)) + + layers.append(nn.Linear(hidden_dims[-1], output_dim)) + layers.append(nn.LogSoftmax(dim=1)) + + model = nn.Sequential(*layers) + + if use_gpu: + model = model.cuda() + + return model
+ + + # Create a linear regressor model +
+[docs] + @staticmethod + def create_linear_regressor(input_dim, output_dim, use_gpu=False): + """ + Creates a linear regressor neural network model. + + Args: + input_dim (int): Dimension of the input data. + output_dim (int): Dimension of the output. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + class LinearRegressionModel(nn.Module): + def __init__(self): + super(LinearRegressionModel, self).__init__() + self.linear = nn.Linear(input_dim, output_dim) + + def forward(self, x): + return self.linear(x) + + model = LinearRegressionModel() + + if use_gpu: + model = model.cuda() + + return model
+ + + # Create a logistic regressor model +
+[docs] + @staticmethod + def create_logistic_regressor(input_dim, use_gpu=False): + """ + Creates a logistic regressor neural network model. + + Args: + input_dim (int): Dimension of the input data. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + class LogisticRegressionModel(nn.Module): + def __init__(self): + super(LogisticRegressionModel, self).__init__() + self.linear = nn.Linear(input_dim, 1) + + def forward(self, x): + return torch.sigmoid(self.linear(x)) + + model = LogisticRegressionModel() + + if use_gpu: + model = model.cuda() + + return model
+ + +
+[docs] + @staticmethod + def create_convolutional_neural_network(input_channels, output_channels, kernel_size, use_gpu=False): + """ + Creates a convolutional neural network (CNN) model. + + Args: + input_channels (int): Number of input channels. + output_channels (int): Number of output channels. + kernel_size (int): Size of the convolutional kernel. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + + model = nn.Sequential( + nn.Conv2d(input_channels, output_channels, kernel_size), + nn.ReLU(), + nn.MaxPool2d(2) + ) + + if use_gpu: + model = model.cuda() + + return model
+ + +
+[docs] + @staticmethod + def create_recurrent_neural_network(input_size, hidden_size, use_gpu=False): + """ + Creates a recurrent neural network (RNN) model. + + Args: + input_size (int): Size of the input. + hidden_size (int): Size of the hidden layer. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + + model = nn.RNN(input_size, hidden_size, batch_first=True) + + if use_gpu: + model = model.cuda() + + return model
+ + +
+[docs] + @staticmethod + def create_lstm_network(input_size, hidden_size, use_gpu=False): + """ + Creates a Long Short-Term Memory (LSTM) network model. + + Args: + input_size (int): Size of the input layer. + hidden_size (int): Size of the hidden layer. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + + model = nn.LSTM(input_size, hidden_size, batch_first=True) + + if use_gpu: + model = model.cuda() + + return model
+ + + # Create the dynamic model +
+[docs] + def create_model(self, model_type: str, params_dict={}) -> torch.nn.Module: + """ + Create a specific type of model dynamically based on the given parameters. + + Args: + model_type (str): Type of the model to create ('Binary Classifier', 'Multiclass Classifier', 'Linear Regressor', 'Logistic Regressor', 'SVM', 'Neural Network Classifier', 'Convolutional Neural Network', 'Recurrent Neural Network', 'LSTM Network', 'Autoencoder'). + params_dict (dict): Dictionary containing parameters for model creation. + + Returns: + torch.nn.Module: Created PyTorch model. + """ + if model_type == 'Binary Classifier': + return self.create_binary_classifier( + params_dict['input_dim'], params_dict['hidden_dims'], + params_dict['output_dim'], params_dict.get('activation', 'relu'), + params_dict.get('dropout_rate', 0.0), params_dict.get('batch_norm', False), + params_dict.get('use_gpu', False) + ) + elif model_type == 'Multiclass Classifier': + return self.create_multiclass_classifier( + params_dict['input_dim'], params_dict['hidden_dims'], + params_dict['output_dim'], params_dict.get('activation', 'relu'), + params_dict.get('dropout_rate', 0.0), params_dict.get('batch_norm', False), + params_dict.get('use_gpu', False) + ) + elif model_type == 'Linear Regressor': + return self.create_linear_regressor( + params_dict['input_dim'], params_dict['output_dim'], + params_dict.get('use_gpu', False) + ) + elif model_type == 'Logistic Regressor': + return self.create_logistic_regressor( + params_dict['input_dim'], params_dict.get('use_gpu', False) + ) + elif model_type == 'Neural Network Classifier': + return self.create_neural_network_classifier( + params_dict['input_dim'], params_dict['output_dim'], + params_dict['hidden_dims'], params_dict.get('activation', 'relu'), + params_dict.get('dropout_rate', 0.0), params_dict.get('batch_norm', False), + params_dict.get('num_layers', 2), params_dict.get('use_gpu', False) + ) + elif model_type == 'Convolutional Neural Network': + return self.create_convolutional_neural_network( + params_dict['input_channels'], params_dict['output_channels'], + params_dict['kernel_size'], params_dict.get('use_gpu', False) + ) + elif model_type == 'Recurrent Neural Network': + return self.create_recurrent_neural_network( + params_dict['input_size'], params_dict['hidden_size'], + params_dict.get('use_gpu', False) + ) + elif model_type == 'LSTM Network': + return self.create_lstm_network( + params_dict['input_size'], params_dict['hidden_size'], + params_dict.get('use_gpu', False) + ) + elif model_type == 'Autoencoder': + return self.create_autoencoder( + params_dict['input_size'], params_dict['encoder_hidden_size'], + params_dict.get('use_gpu', False) + ) + else: + raise ValueError("Invalid model type provided")
+
+ + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/LearningManager/federated_dataset.html b/docs/_build/_modules/Medfl/LearningManager/federated_dataset.html new file mode 100644 index 0000000..37059cf --- /dev/null +++ b/docs/_build/_modules/Medfl/LearningManager/federated_dataset.html @@ -0,0 +1,179 @@ + + + + + + MEDfl.LearningManager.federated_dataset — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for MEDfl.LearningManager.federated_dataset

+from scripts.base import *
+from MEDfl.NetManager.net_helper import *
+from MEDfl.NetManager.net_manager_queries import *
+
+
+
+[docs] +class FederatedDataset: +
+[docs] + def __init__( + self, + name: str, + train_nodes: list, + test_nodes: list, + trainloaders: list, + valloaders: list, + testloaders: list, + ): + """ + Represents a Federated Dataset. + + :param name: Name of the Federated Dataset. + :param train_nodes: List of train nodes. + :param test_nodes: List of test nodes. + :param trainloaders: List of train data loaders. + :param valloaders: List of validation data loaders. + :param testloaders: List of test data loaders. + """ + self.name = name + self.train_nodes = train_nodes + self.test_nodes = test_nodes + self.trainloaders = trainloaders + self.valloaders = valloaders + self.testloaders = testloaders + self.size = len(self.trainloaders[0].dataset[0][0])
+ + +
+[docs] + def create(self, FLsetupId: int): + """ + Create a new Federated Dataset in the database. + + :param FLsetupId: The FLsetup ID associated with the Federated Dataset. + """ + query_params = {"name": self.name, "FLsetupId": FLsetupId} + fedDataId = get_feddataset_id_from_name(self.name) + if fedDataId : + self.id = fedDataId + else: + my_eng.execute(text(INSERT_FLDATASET_QUERY), query_params) + self.id = get_feddataset_id_from_name(self.name)
+ + + +
+[docs] + def update(self, FLpipeId: int, FedId: int): + """ + Update the FLpipe ID associated with the Federated Dataset in the database. + + :param FLpipeId: The new FLpipe ID to be updated. + :param FedId: The Federated Dataset ID. + """ + query_params = {"FLpipeId": FLpipeId, "FedId": FedId} + my_eng.execute(text(UPDATE_FLDATASET_QUERY), **query_params)
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/LearningManager/flpipeline.html b/docs/_build/_modules/Medfl/LearningManager/flpipeline.html new file mode 100644 index 0000000..8dfd867 --- /dev/null +++ b/docs/_build/_modules/Medfl/LearningManager/flpipeline.html @@ -0,0 +1,312 @@ + + + + + + MEDfl.LearningManager.flpipeline — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for MEDfl.LearningManager.flpipeline

+import datetime
+from typing import List
+import json
+import pandas as pd
+
+
+# File: create_query.py
+from sqlalchemy import text
+from torch.utils.data import DataLoader, TensorDataset
+
+from MEDfl.LearningManager.server import FlowerServer
+from MEDfl.LearningManager.utils import params, test
+from scripts.base import my_eng
+from MEDfl.NetManager.net_helper import get_flpipeline_from_name
+from MEDfl.NetManager.net_manager_queries import (CREATE_FLPIPELINE_QUERY,
+                                                  DELETE_FLPIPELINE_QUERY , CREATE_TEST_RESULTS_QUERY)
+
+
+
+[docs] +def create_query(name, description, creation_date, result): + query = text( + f"INSERT INTO FLpipeline(name, description, creation_date, results) " + f"VALUES ('{name}', '{description}', '{creation_date}', '{result}')" + ) + return query
+ + + + +
+[docs] +class FLpipeline: + """ + FLpipeline class for managing Federated Learning pipelines. + + Attributes: + name (str): The name of the FLpipeline. + description (str): A description of the FLpipeline. + server (FlowerServer): The FlowerServer object associated with the FLpipeline. + + Methods: + __init__(self, name: str, description: str, server: FlowerServer) -> None: + Initialize FLpipeline with the specified name, description, and server. + + + """ + + def __init__( + self, name: str, description: str, server: FlowerServer + ) -> None: + self.name = name + self.description = description + self.server = server + self.validate() + +
+[docs] + def validate(self) -> None: + """ + Validate the name, description, and server attributes. + Raises: + TypeError: If the name is not a string, the description is not a string, + or the server is not a FlowerServer object. + """ + if not isinstance(self.name, str): + raise TypeError("name argument must be a string") + + if not isinstance(self.description, str): + raise TypeError("description argument must be a string") + + if not isinstance(self.server, FlowerServer): + raise TypeError("server argument must be a FlowerServer")
+ + +
+[docs] + def create(self, result: str) -> None: + """ + Create a new FLpipeline entry in the database with the given result. + + Args: + result (str): The result string to store in the database. + + """ + creation_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + query = CREATE_FLPIPELINE_QUERY.format( + name=self.name, + description=self.description, + creation_date=creation_date, + result=result, + ) + my_eng.execute(text(query)) + self.id = get_flpipeline_from_name(self.name) + try: + self.server.fed_dataset.update( + FLpipeId=self.id, FedId=self.server.fed_dataset.id + ) + except: + pass
+ + +
+[docs] + def delete(self) -> None: + """ + Delete the FLpipeline entry from the database based on its name. + + Note: This is a placeholder method and needs to be implemented based on your specific database setup. + + """ + # Placeholder code for deleting the FLpipeline entry from the database based on the name. + # You need to implement the actual deletion based on your database setup. + my_eng.execute(DELETE_FLPIPELINE_QUERY.format(self.name))
+ + +
+[docs] + def test_by_node(self, node_name: str, test_frac=1) -> dict: + """ + Test the FLpipeline by node with the specified test_frac. + + Args: + node_name (str): The name of the node to test. + test_frac (float, optional): The fraction of the test data to use. Default is 1. + + Returns: + dict: A dictionary containing the node name and the classification report. + + """ + idx = self.server.fed_dataset.test_nodes.index(node_name) + global_model, test_loader = ( + self.server.global_model, + self.server.fed_dataset.testloaders[idx], + ) + test_data = test_loader.dataset + test_data = TensorDataset( + test_data[: int(test_frac * len(test_data))][0], + test_data[: int(test_frac * len(test_data))][1], + ) + test_loader = DataLoader( + test_data, batch_size=params["test_batch_size"] + ) + classification_report = test( + model=global_model.model, test_loader=test_loader + ) + return { + "node_name": node_name, + "classification_report": str(classification_report), + }
+ + +
+[docs] + def auto_test(self, test_frac=1) -> List[dict]: + """ + Automatically test the FLpipeline on all nodes with the specified test_frac. + + Args: + test_frac (float, optional): The fraction of the test data to use. Default is 1. + + Returns: + List[dict]: A list of dictionaries containing the node names and the classification reports. + + """ + result = [ + self.test_by_node(node, test_frac) + for node in self.server.fed_dataset.test_nodes + ] + self.create("\n".join(str(res).replace("'", '"') for res in result)) + + # stockage des resultats des tests + for entry in result: + node_name = entry['node_name'] + classification_report_str = entry['classification_report'] + + # Convert the 'classification_report' string to a dictionary + classification_report_dict = json.loads(classification_report_str.replace("'", "\"")) + try: + # Insert record into the 'testResults' table + query = CREATE_TEST_RESULTS_QUERY.format( + pipelineId = self.id, + nodeName = node_name , + confusion_matrix = json.dumps(classification_report_dict['confusion matrix']), + accuracy =classification_report_dict['Accuracy'] , + sensivity = classification_report_dict['Sensitivity/Recall'] , + ppv = classification_report_dict['PPV/Precision'] , + npv= classification_report_dict['NPV'] , + f1score= classification_report_dict['F1-score'] , + fpr= classification_report_dict['False positive rate'] , + tpr= classification_report_dict['True positive rate'] + ) + my_eng.execute(text(query)) + except Exception as e: + # This block will catch any other exceptions + print(f"An unexpected error occurred: {e}") + + + + return result
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/LearningManager/model.html b/docs/_build/_modules/Medfl/LearningManager/model.html new file mode 100644 index 0000000..e6c903c --- /dev/null +++ b/docs/_build/_modules/Medfl/LearningManager/model.html @@ -0,0 +1,340 @@ + + + + + + MEDfl.LearningManager.model — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for MEDfl.LearningManager.model

+#!/usr/bin/env python3
+# froked from https://github.com/pythonlessons/mltu/blob/main/mltu/torch/model.py
+
+import typing
+from collections import OrderedDict
+from typing import Dict, List, Optional, Tuple
+
+import numpy as np
+import torch
+import torch.nn as nn
+from sklearn.metrics import accuracy_score
+
+from .utils import params
+
+
+
+[docs] +class Model: + """ + Model class for training and testing PyTorch neural networks. + + Attributes: + model (torch.nn.Module): PyTorch neural network. + optimizer (torch.optim.Optimizer): PyTorch optimizer. + criterion (typing.Callable): Loss function. + """ + +
+[docs] + def __init__( + self, + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + criterion: typing.Callable, + ) -> None: + """ + Initialize Model class with the specified model, optimizer, and criterion. + + Args: + model (torch.nn.Module): PyTorch neural network. + optimizer (torch.optim.Optimizer): PyTorch optimizer. + criterion (typing.Callable): Loss function. + """ + self.model = model + self.optimizer = optimizer + self.criterion = criterion + # Get device on which model is running + self.validate()
+ + +
+[docs] + def validate(self) -> None: + """ + Validate model and optimizer. + """ + if not isinstance(self.model, torch.nn.Module): + raise TypeError("model argument must be a torch.nn.Module") + + if not isinstance(self.optimizer, torch.optim.Optimizer): + raise TypeError( + "optimizer argument must be a torch.optim.Optimizer" + )
+ + +
+[docs] + def get_parameters(self) -> List[np.ndarray]: + """ + Get the parameters of the model as a list of NumPy arrays. + + Returns: + List[np.ndarray]: The parameters of the model as a list of NumPy arrays. + """ + return [ + val.cpu().numpy() for _, val in self.model.state_dict().items() + ]
+ + +
+[docs] + def set_parameters(self, parameters: List[np.ndarray]) -> None: + """ + Set the parameters of the model from a list of NumPy arrays. + + Args: + parameters (List[np.ndarray]): The parameters to be set. + """ + params_dict = zip(self.model.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + self.model.load_state_dict(state_dict, strict=True)
+ + +
+[docs] + def train( + self, train_loader, epoch, device, privacy_engine, diff_priv=False + ) -> float: + """ + Train the model on the given train_loader for one epoch. + + Args: + train_loader: The data loader for training data. + epoch (int): The current epoch number. + device: The device on which to perform the training. + privacy_engine: The privacy engine used for differential privacy (if enabled). + diff_priv (bool, optional): Whether differential privacy is used. Default is False. + + Returns: + float: The value of epsilon used in differential privacy. + """ + self.model.train() + epsilon = 0 + losses = [] + top1_acc = [] + + for i, (X_train, y_train) in enumerate(train_loader): + self.optimizer.zero_grad() + + # compute output + y_hat = torch.squeeze(self.model(X_train), 1) + loss = self.criterion(y_hat, y_train) + + preds = np.argmax(y_hat.detach().cpu().numpy(), axis=0) + labels = y_train.detach().cpu().numpy() + + # measure accuracy and record loss + acc = (preds == labels).mean() + + losses.append(loss.item()) + top1_acc.append(acc) + + loss.backward() + self.optimizer.step() + + if diff_priv: + epsilon = privacy_engine.get_epsilon(params["DELTA"]) + + if (i + 1) % 10 == 0: + if diff_priv: + epsilon = privacy_engine.get_epsilon(params["DELTA"]) + print( + f"\tTrain Epoch: {epoch} \t" + f"Loss: {np.mean(losses):.6f} " + f"Acc@1: {np.mean(top1_acc) * 100:.6f} " + f"(ε = {epsilon:.2f}, δ = {params['DELTA']})" + ) + else: + print( + f"\tTrain Epoch: {epoch} \t" + f"Loss: {np.mean(losses):.6f} " + f"Acc@1: {np.mean(top1_acc) * 100:.6f}" + ) + + return epsilon
+ + +
+[docs] + def evaluate( + self, val_loader, device=torch.device("cpu") + ) -> Tuple[float, float]: + """ + Evaluate the model on the given validation data. + + Args: + val_loader: The data loader for validation data. + device: The device on which to perform the evaluation. Default is 'cpu'. + + Returns: + Tuple[float, float]: The evaluation loss and accuracy. + """ + correct, total, loss, accuracy = 0, 0, 0.0, [] + self.model.eval() + + with torch.no_grad(): + for X_test, y_test in val_loader: + y_hat = torch.squeeze(self.model(X_test), 1) + accuracy.append(accuracy_score(y_test, y_hat.round())) + loss += self.criterion(y_hat, y_test).item() + total += y_test.size(0) + correct += np.sum( + y_hat.round().detach().numpy() == y_test.detach().numpy() + ) + + loss /= len(val_loader.dataset) + return loss, np.mean(accuracy)
+ + +
+[docs] + @staticmethod + def save_model(model , model_name:str): + """ + Saves a PyTorch model to a file. + + Args: + model (torch.nn.Module): PyTorch model to be saved. + model_name (str): Name of the model file. + + Raises: + Exception: If there is an issue during the saving process. + + Returns: + None + """ + try: + torch.save(model, '../../notebooks/.ipynb_checkpoints/trainedModels/' + model_name + ".pth") + except Exception as e: + raise Exception(f"Error saving the model: {str(e)}")
+ + +
+[docs] + @staticmethod + def load_model(model_name:str): + """ + Loads a PyTorch model from a file. + + Args: + model_name (str): Name of the model file to be loaded. + + Returns: + torch.nn.Module: Loaded PyTorch model. + """ + loadedModel = torch.load('../../notebooks/.ipynb_checkpoints/trainedModels/'+model_name+".pth") + return loadedModel
+
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/LearningManager/plot.html b/docs/_build/_modules/Medfl/LearningManager/plot.html new file mode 100644 index 0000000..46cf40b --- /dev/null +++ b/docs/_build/_modules/Medfl/LearningManager/plot.html @@ -0,0 +1,357 @@ + + + + + + MEDfl.LearningManager.plot — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for MEDfl.LearningManager.plot

+import matplotlib.pyplot as plt
+import numpy as np
+import seaborn as sns
+
+from .utils import *
+
+# Replace this with your actual code for data collection
+results_dict = {
+    ("LR: 0.001, Optimizer: Adam", "accuracy"): [0.85, 0.89, 0.92, 0.94, ...],
+    ("LR: 0.001, Optimizer: Adam", "loss"): [0.2, 0.15, 0.1, 0.08, ...],
+    ("LR: 0.01, Optimizer: SGD", "accuracy"): [0.88, 0.91, 0.93, 0.95, ...],
+    ("LR: 0.01, Optimizer: SGD", "loss"): [0.18, 0.13, 0.09, 0.07, ...],
+    ("LR: 0.1, Optimizer: Adam", "accuracy"): [0.82, 0.87, 0.91, 0.93, ...],
+    ("LR: 0.1, Optimizer: Adam", "loss"): [0.25, 0.2, 0.15, 0.12, ...],
+}
+"""
+server should have:
+ #len = num of rounds
+  self.accuracies
+  self.losses
+  
+Client should have
+  # len = num of epochs
+  self.accuracies
+  self.losses
+  self.epsilons
+  self.deltas
+  
+#common things : LR,SGD, Aggregation
+  
+"""
+
+
+
+[docs] +class AccuracyLossPlotter: + """ + A utility class for plotting accuracy and loss metrics based on experiment results. + + Args: + results_dict (dict): Dictionary containing experiment results organized by parameters and metrics. + + Attributes: + results_dict (dict): Dictionary containing experiment results organized by parameters and metrics. + parameters (list): List of unique parameters in the experiment results. + metrics (list): List of unique metrics in the experiment results. + iterations (range): Range of iterations (rounds or epochs) in the experiment. + """ + +
+[docs] + def __init__(self, results_dict): + """ + Initialize the AccuracyLossPlotter with experiment results. + + Args: + results_dict (dict): Dictionary containing experiment results organized by parameters and metrics. + """ + self.results_dict = results_dict + self.parameters = list( + set([param[0] for param in results_dict.keys()]) + ) + self.metrics = list(set([param[1] for param in results_dict.keys()])) + self.iterations = range(1, len(list(results_dict.values())[0]) + 1)
+ + +
+[docs] + def plot_accuracy_loss(self): + """ + Plot accuracy and loss metrics for different parameters. + """ + + plt.figure(figsize=(8, 6)) + + for param in self.parameters: + for metric in self.metrics: + key = (param, metric) + values = self.results_dict[key] + plt.plot( + self.iterations, + values, + label=f"{param} ({metric})", + marker="o", + linestyle="-", + ) + + plt.xlabel("Rounds") + plt.ylabel("Accuracy / Loss") + plt.title("Accuracy and Loss by Parameters") + plt.legend() + plt.grid(True) + plt.show()
+ + +
+[docs] + @staticmethod + def plot_global_confusion_matrix(pipeline_name: str): + """ + Plot a global confusion matrix based on pipeline results. + + Args: + pipeline_name (str): Name of the pipeline. + + Returns: + None + """ + # Get the id of the pipeline by name + pipeline_id = get_pipeline_from_name(pipeline_name) + # get the confusion matrix pf the pipeline + confusion_matrix = get_pipeline_confusion_matrix(pipeline_id) + + # Extracting confusion matrix values + TP = confusion_matrix['TP'] + FP = confusion_matrix['FP'] + FN = confusion_matrix['FN'] + TN = confusion_matrix['TN'] + + # Creating a matrix for visualization + matrix = [[TN, FP], + [FN, TP]] + + # Plotting the confusion matrix as a heatmap + plt.figure(figsize=(6, 4)) + sns.heatmap(matrix, annot=True, fmt='d', cmap='Blues', + xticklabels=['Predicted Negative', 'Predicted Positive'], + yticklabels=['Actual Negative', 'Actual Positive']) + plt.title('Global Confusion Matrix') + plt.xlabel('Predicted label') + plt.ylabel('True label') + plt.tight_layout() + + # Display the confusion matrix heatmap + plt.show()
+ + +
+[docs] + @staticmethod + def plot_confusion_Matrix_by_node(node_name: str, pipeline_name: str): + """ + Plot a confusion matrix for a specific node in the pipeline. + + Args: + node_name (str): Name of the node. + pipeline_name (str): Name of the pipeline. + + Returns: + None + """ + + # Get the id of the pipeline by name + pipeline_id = get_pipeline_from_name(pipeline_name) + # get the confusion matrix pf the pipeline + confusion_matrix = get_node_confusion_matrix( + pipeline_id, node_name=node_name) + + # Extracting confusion matrix values + TP = confusion_matrix['TP'] + FP = confusion_matrix['FP'] + FN = confusion_matrix['FN'] + TN = confusion_matrix['TN'] + + # Creating a matrix for visualization + matrix = [[TN, FP], + [FN, TP]] + + # Plotting the confusion matrix as a heatmap + plt.figure(figsize=(6, 4)) + sns.heatmap(matrix, annot=True, fmt='d', cmap='Blues', + xticklabels=['Predicted Negative', 'Predicted Positive'], + yticklabels=['Actual Negative', 'Actual Positive']) + plt.title('Confusion Matrix of node: '+node_name) + plt.xlabel('Predicted label') + plt.ylabel('True label') + plt.tight_layout() + + # Display the confusion matrix heatmap + plt.show() + return
+ + +
+[docs] + @staticmethod + def plot_classification_report(pipeline_name: str): + """ + Plot a comparison of classification report metrics between nodes. + + Args: + pipeline_name (str): Name of the pipeline. + + Returns: + None + """ + + colors = ['#FF5733', '#6A5ACD', '#3CB371', '#FFD700', '#FFA500', '#8A2BE2', '#00FFFF', '#FF00FF', '#A52A2A', '#00FF00'] + + # Get the id of the pipeline by name + pipeline_id = get_pipeline_from_name(pipeline_name) + + pipeline_results = get_pipeline_result(pipeline_id) + + nodesList = pipeline_results['nodename'] + classificationReports = [] + + for index, node in enumerate(nodesList): + classificationReports.append({ + 'Accuracy': pipeline_results['accuracy'][index], + 'Sensitivity/Recall': pipeline_results['sensivity'][index], + 'PPV/Precision': pipeline_results['ppv'][index], + 'NPV': pipeline_results['npv'][index], + 'F1-score': pipeline_results['f1score'][index], + 'False positive rate': pipeline_results['fpr'][index], + 'True positive rate': pipeline_results['tpr'][index] + }) + + metric_labels = list(classificationReports[0].keys()) # Assuming both reports have the same keys + + # Set the positions of the bars on the x-axis + x = np.arange(len(metric_labels)) + + # Set the width of the bars + width = 0.35 + + plt.figure(figsize=(12, 6)) + + for index, report in enumerate(classificationReports): + metric = list(report.values()) + plt.bar(x + (index - len(nodesList) / 2) * width / len(nodesList), metric, width / len(nodesList), + label=nodesList[index], color=colors[index % len(colors)]) + + # Adding labels, title, and legend + plt.xlabel('Metrics') + plt.ylabel('Values') + plt.title('Comparison of Classification Report Metrics between Nodes') + plt.xticks(ticks=x, labels=metric_labels, rotation=45) + plt.legend() + + # Show plot + plt.tight_layout() + plt.show() + + return
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/LearningManager/server.html b/docs/_build/_modules/Medfl/LearningManager/server.html new file mode 100644 index 0000000..ecd038b --- /dev/null +++ b/docs/_build/_modules/Medfl/LearningManager/server.html @@ -0,0 +1,300 @@ + + + + + + MEDfl.LearningManager.server — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for MEDfl.LearningManager.server

+#!/usr/bin/env python3
+
+import copy
+from typing import Dict, Optional, Tuple
+
+import flwr as fl
+import torch
+
+from .client import FlowerClient
+from .federated_dataset import FederatedDataset
+from .model import Model
+from .strategy import Strategy
+
+
+
+[docs] +class FlowerServer: + """ + A class representing the central server for Federated Learning using Flower. + + Attributes: + global_model (Model): The global model that will be federated among clients. + strategy (Strategy): The strategy used for federated learning, specifying communication and aggregation methods. + num_rounds (int): The number of federated learning rounds to perform. + num_clients (int): The number of clients participating in the federated learning process. + fed_dataset (FederatedDataset): The federated dataset used for training and evaluation. + diff_priv (bool): Whether differential privacy is used during the federated learning process. + accuracies (List[float]): A list to store the accuracy of the global model during each round. + losses (List[float]): A list to store the loss of the global model during each round. + flower_clients (List[FlowerClient]): A list to store the FlowerClient objects representing individual clients. + + """ + +
+[docs] + def __init__( + self, + global_model: Model, + strategy: Strategy, + num_rounds: int, + num_clients: int, + fed_dataset: FederatedDataset, + diff_privacy: bool = False, + client_resources: Optional[Dict[str, float]] = {'num_cpus': 1, 'num_gpus': 0.0} + ) -> None: + """ + Initialize a FlowerServer object with the specified parameters. + + Args: + global_model (Model): The global model that will be federated among clients. + strategy (Strategy): The strategy used for federated learning, specifying communication and aggregation methods. + num_rounds (int): The number of federated learning rounds to perform. + num_clients (int): The number of clients participating in the federated learning process. + fed_dataset (FederatedDataset): The federated dataset used for training and evaluation. + diff_privacy (bool, optional): Whether differential privacy is used during the federated learning process. + Default is False. + """ + self.device = torch.device( + f"cuda" if torch.cuda.is_available() else "cpu" + ) + self.global_model = global_model + self.params = global_model.get_parameters() + self.global_model.model = global_model.model.to(self.device) + self.num_rounds = num_rounds + self.num_clients = num_clients + self.fed_dataset = fed_dataset + self.strategy = strategy + self.client_resources = client_resources + setattr( + self.strategy.strategy_object, + "min_available_clients", + self.num_clients, + ) + setattr( + self.strategy.strategy_object, + "initial_parameters", + fl.common.ndarrays_to_parameters(self.params), + ) + setattr(self.strategy.strategy_object, "evaluate_fn", self.evaluate) + self.fed_dataset = fed_dataset + self.diff_priv = diff_privacy + self.accuracies = [] + self.losses = [] + self.flower_clients = [] + self.validate()
+ + +
+[docs] + def validate(self) -> None: + """Validate global_model, strategy, num_clients, num_rounds, fed_dataset, diff_privacy""" + if not isinstance(self.global_model, Model): + raise TypeError("global_model argument must be a Model instance") + + if not isinstance(self.strategy, Strategy): + raise TypeError("strategy argument must be a Strategy instance") + + if not isinstance(self.num_clients, int): + raise TypeError("num_clients argument must be an int") + + if not isinstance(self.num_rounds, int): + raise TypeError("num_rounds argument must be an int") + + if not isinstance(self.diff_priv, bool): + raise TypeError("diff_priv argument must be a bool")
+ + +
+[docs] + def client_fn(self, cid) -> FlowerClient: + """ + Return a FlowerClient object for a specific client ID. + + Args: + cid: The client ID. + + Returns: + FlowerClient: A FlowerClient object representing the individual client. + """ + device = torch.device( + f"cuda:{int(cid) % 4}" if torch.cuda.is_available() else "cpu" + ) + client_model = copy.deepcopy(self.global_model) + trainloader = self.fed_dataset.trainloaders[int(cid)] + valloader = self.fed_dataset.valloaders[int(cid)] + # this helps in making plots + client = FlowerClient( + cid, client_model, trainloader, valloader, self.diff_priv + ) + self.flower_clients.append(client) + return client
+ + +
+[docs] + def evaluate( + self, + server_round: int, + parameters: fl.common.NDArrays, + config: Dict[str, fl.common.Scalar], + ) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]: + """ + Evaluate the global model on the validation dataset and update the accuracies and losses. + + Args: + server_round (int): The current round of the federated learning process. + parameters (fl.common.NDArrays): The global model parameters. + config (Dict[str, fl.common.Scalar]): Configuration dictionary. + + Returns: + Optional[Tuple[float, Dict[str, fl.common.Scalar]]]: The evaluation loss and accuracy. + """ + testloader = self.fed_dataset.valloaders[0] + self.global_model.set_parameters( + parameters + ) # Update model with the latest parameters + loss, accuracy = self.global_model.evaluate(testloader, self.device) + self.losses.append(loss) + self.accuracies.append(accuracy) + print(f"Server-side evaluation loss {loss} / accuracy {accuracy}") + return loss, {"accuracy": accuracy}
+ + +
+[docs] + def run(self) -> None: + """ + Run the federated learning process using Flower simulation. + + Returns: + History: The history of the accuracies and losses during the training of each node + """ + # Increase the object store memory to the minimum allowed value or higher + ray_init_args = {"include_dashboard": False + , "object_store_memory": 78643200 + } + + history = fl.simulation.start_simulation( + client_fn=self.client_fn, + num_clients=self.num_clients, + config=fl.server.ServerConfig(self.num_rounds), + strategy=self.strategy.strategy_object, + ray_init_args=ray_init_args, + client_resources = self.client_resources + ) + + return history
+
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/LearningManager/strategy.html b/docs/_build/_modules/Medfl/LearningManager/strategy.html new file mode 100644 index 0000000..c7b8977 --- /dev/null +++ b/docs/_build/_modules/Medfl/LearningManager/strategy.html @@ -0,0 +1,185 @@ + + + + + + MEDfl.LearningManager.strategy — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for MEDfl.LearningManager.strategy

+
+from collections import OrderedDict
+from typing import Dict, List, Optional, Tuple
+
+import flwr as fl
+import numpy as np
+
+
+
+
+
+
+[docs] +class Strategy: + """ + A class representing a strategy for Federated Learning. + + Attributes: + name (str): The name of the strategy. Default is "FedAvg". + fraction_fit (float): Fraction of clients to use for training during each round. Default is 1.0. + fraction_evaluate (float): Fraction of clients to use for evaluation during each round. Default is 1.0. + min_fit_clients (int): Minimum number of clients to use for training during each round. Default is 2. + min_evaluate_clients (int): Minimum number of clients to use for evaluation during each round. Default is 2. + min_available_clients (int): Minimum number of available clients required to start a round. Default is 2. + initial_parameters (Optional[]): The initial parameters of the server model + Methods: + + """ + +
+[docs] + def __init__( + self, + name: str = "FedAvg", + fraction_fit: float = 1.0, + fraction_evaluate: float = 1.0, + min_fit_clients: int = 2, + min_evaluate_clients: int = 2, + min_available_clients: int = 2, + initial_parameters = [], + evaluation_methode = "centralized" + ) -> None: + """ + Initialize a Strategy object with the specified parameters. + + Args: + name (str): The name of the strategy. Default is "FedAvg". + fraction_fit (float): Fraction of clients to use for training during each round. Default is 1.0. + fraction_evaluate (float): Fraction of clients to use for evaluation during each round. Default is 1.0. + min_fit_clients (int): Minimum number of clients to use for training during each round. Default is 2. + min_evaluate_clients (int): Minimum number of clients to use for evaluation during each round. Default is 2. + min_available_clients (int): Minimum number of available clients required to start a round. Default is 2. + initial_parameters (Optional[]): The initial parametres of the server model + evaluation_methode ( "centralized" | "distributed") + """ + self.fraction_fit = fraction_fit + self.fraction_evaluate = fraction_evaluate + self.min_fit_clients = min_fit_clients + self.min_evaluate_clients = min_evaluate_clients + self.min_available_clients = min_available_clients + self.initial_parameters = initial_parameters + self.evaluate_fn = None + self.name = name + self.strategy_object = eval( + f"fl.server.strategy.{self.name}(\ + fraction_fit={self.fraction_fit},\ + fraction_evaluate= {self.fraction_evaluate},\ + min_fit_clients= {self.min_fit_clients},\ + min_evaluate_clients= {self.min_evaluate_clients},\ + min_available_clients={self.min_available_clients},\ + initial_parameters=fl.common.ndarrays_to_parameters(self.initial_parameters),\ + evaluate_fn={self.evaluate_fn})" + )
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/LearningManager/utils.html b/docs/_build/_modules/Medfl/LearningManager/utils.html new file mode 100644 index 0000000..c3efb2a --- /dev/null +++ b/docs/_build/_modules/Medfl/LearningManager/utils.html @@ -0,0 +1,355 @@ + + + + + + MEDfl.LearningManager.utils — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for MEDfl.LearningManager.utils

+#!/usr/bin/env python3
+
+import pkg_resources
+import torch
+import yaml
+from sklearn.metrics import *
+from yaml.loader import SafeLoader
+
+from scripts.base import *
+import json
+
+
+import pandas as pd
+
+yaml_path = pkg_resources.resource_filename(__name__, "params.yaml")
+with open(yaml_path) as g:
+    params = yaml.load(g, Loader=SafeLoader)
+
+
+
+[docs] +def custom_classification_report(y_true, y_pred): + """ + Compute custom classification report metrics including accuracy, sensitivity, specificity, precision, NPV, + F1-score, false positive rate, and true positive rate. + + Args: + y_true (array-like): True labels. + y_pred (array-like): Predicted labels. + + Returns: + dict: A dictionary containing custom classification report metrics. + """ + + tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel() + + # Accuracy + denominator_acc = tp + tn + fp + fn + acc = (tp + tn) / denominator_acc if denominator_acc != 0 else 0.0 + + # Sensitivity/Recall + denominator_sen = tp + fn + sen = tp / denominator_sen if denominator_sen != 0 else 0.0 + + # Specificity + denominator_sp = tn + fp + sp = tn / denominator_sp if denominator_sp != 0 else 0.0 + + # PPV/Precision + denominator_ppv = tp + fp + ppv = tp / denominator_ppv if denominator_ppv != 0 else 0.0 + +# NPV + denominator_npv = tn + fn + npv = tn / denominator_npv if denominator_npv != 0 else 0.0 + + # F1 Score + denominator_f1 = sen + ppv + f1 = 2 * (sen * ppv) / denominator_f1 if denominator_f1 != 0 else 0.0 + + # False Positive Rate + denominator_fpr = fp + tn + fpr = fp / denominator_fpr if denominator_fpr != 0 else 0.0 + + # True Positive Rate + denominator_tpr = tp + fn + tpr = tp / denominator_tpr if denominator_tpr != 0 else 0.0 + + return { + "confusion matrix": {"TP": tp, "FP": fp, "FN": fn, "TN": tn}, + "Accuracy": round(acc, 3), + "Sensitivity/Recall": round(sen, 3), + "Specificity": round(sp, 3), + "PPV/Precision": round(ppv, 3), + "NPV": round(npv, 3), + "F1-score": round(f1, 3), + "False positive rate": round(fpr, 3), + "True positive rate": round(tpr, 3), + }
+ + + +
+[docs] +def test(model, test_loader, device=torch.device("cpu")): + """ + Evaluate a model using a test loader and return a custom classification report. + + Args: + model (torch.nn.Module): PyTorch model to evaluate. + test_loader (torch.utils.data.DataLoader): DataLoader for the test dataset. + device (torch.device, optional): Device for model evaluation. Default is "cpu". + + Returns: + dict: A dictionary containing custom classification report metrics. + """ + + model.eval() + with torch.no_grad(): + X_test, y_test = test_loader.dataset[:][0], test_loader.dataset[:][1] + y_hat = torch.squeeze(model(X_test), 1).round() + + return custom_classification_report(y_test, y_hat)
+ + + +column_map = {"object": "VARCHAR(255)", "int64": "INT", "float64": "FLOAT"} + + +
+[docs] +def empty_db(): + """ + Empty the database by deleting records from multiple tables and resetting auto-increment counters. + + Returns: + None + """ + + # my_eng.execute(text(f"DELETE FROM {'DataSets'}")) + my_eng.execute(text(f"DELETE FROM {'Nodes'}")) + my_eng.execute(text(f"DELETE FROM {'FedDatasets'}")) + my_eng.execute(text(f"DELETE FROM {'Networks'}")) + my_eng.execute(text(f"DELETE FROM {'FLsetup'}")) + + my_eng.execute(text(f"DELETE FROM {'FLpipeline'}")) + my_eng.execute(text(f"ALTER TABLE {'Nodes'} AUTO_INCREMENT = 1")) + my_eng.execute(text(f"ALTER TABLE {'Networks'} AUTO_INCREMENT = 1")) + my_eng.execute(text(f"ALTER TABLE {'FedDatasets'} AUTO_INCREMENT = 1")) + my_eng.execute(text(f"ALTER TABLE {'FLsetup'} AUTO_INCREMENT = 1")) + my_eng.execute(text(f"ALTER TABLE {'FLpipeline'} AUTO_INCREMENT = 1")) + my_eng.execute(text(f"DELETE FROM {'testresults'}")) + my_eng.execute(text(f"DROP TABLE IF EXISTS {'masterdataset'}")) + my_eng.execute(text(f"DROP TABLE IF EXISTS {'datasets'}"))
+ + +
+[docs] +def get_pipeline_from_name(name): + """ + Get the pipeline ID from its name in the database. + + Args: + name (str): Name of the pipeline. + + Returns: + int: ID of the pipeline. + """ + + NodeId = int( + pd.read_sql( + text(f"SELECT id FROM flpipeline WHERE name = '{name}'"), my_eng + ).iloc[0, 0] + ) + return NodeId
+ + +
+[docs] +def get_pipeline_confusion_matrix(pipeline_id): + """ + Get the global confusion matrix for a pipeline based on test results. + + Args: + pipeline_id (int): ID of the pipeline. + + Returns: + dict: A dictionary representing the global confusion matrix. + """ + + data = pd.read_sql( + text(f"SELECT confusionmatrix FROM testresults WHERE pipelineid = '{pipeline_id}'"), my_eng + ) + + # Convert the column of strings into a list of dictionaries representing confusion matrices + confusion_matrices = [ + json.loads(matrix.replace("'", "\"")) for matrix in data['confusionmatrix'] + ] + + # Initialize variables for global confusion matrix + global_TP = global_FP = global_FN = global_TN = 0 + + # Iterate through each dictionary and sum the corresponding values for each category + for matrix in confusion_matrices: + global_TP += matrix['TP'] + global_FP += matrix['FP'] + global_FN += matrix['FN'] + global_TN += matrix['TN'] + + # Create a global confusion matrix as a dictionary + global_confusion_matrix = { + 'TP': global_TP, + 'FP': global_FP, + 'FN': global_FN, + 'TN': global_TN + } + # Return the list of dictionaries representing confusion matrices + return global_confusion_matrix
+ + +
+[docs] +def get_node_confusion_matrix(pipeline_id , node_name): + """ + Get the confusion matrix for a specific node in a pipeline based on test results. + + Args: + pipeline_id (int): ID of the pipeline. + node_name (str): Name of the node. + + Returns: + dict: A dictionary representing the confusion matrix for the specified node. + """ + + data = pd.read_sql( + text(f"SELECT confusionmatrix FROM testresults WHERE pipelineid = '{pipeline_id}' AND nodename = '{node_name}'"), my_eng + ) + + # Convert the column of strings into a list of dictionaries representing confusion matrices + confusion_matrices = [ + json.loads(matrix.replace("'", "\"")) for matrix in data['confusionmatrix'] + ] + + + # Return the list of dictionaries representing confusion matrices + return confusion_matrices[0]
+ + +
+[docs] +def get_pipeline_result(pipeline_id): + """ + Get the test results for a pipeline. + + Args: + pipeline_id (int): ID of the pipeline. + + Returns: + pandas.DataFrame: DataFrame containing test results for the specified pipeline. + """ + data = pd.read_sql( + text(f"SELECT * FROM testresults WHERE pipelineid = '{pipeline_id}'"), my_eng + ) + return data
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/NetManager/dataset.html b/docs/_build/_modules/Medfl/NetManager/dataset.html new file mode 100644 index 0000000..24a4943 --- /dev/null +++ b/docs/_build/_modules/Medfl/NetManager/dataset.html @@ -0,0 +1,222 @@ + + + + + + MEDfl.NetManager.dataset — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for MEDfl.NetManager.dataset

+import pandas as pd
+from sqlalchemy import text
+
+from scripts.base import my_eng
+from .net_helper import *
+from .net_manager_queries import (DELETE_DATASET, INSERT_DATASET,
+                                  SELECT_ALL_DATASET_NAMES)
+
+
+
+[docs] +class DataSet: +
+[docs] + def __init__(self, name: str, path: str, engine=None): + """ + Initialize a DataSet object. + + :param name: The name of the dataset. + :type name: str + :param path: The file path of the dataset CSV file. + :type path: str + """ + self.name = name + self.path = path + self.engine = engine if engine is not None else my_eng
+ + +
+[docs] + def validate(self): + """ + Validate name and path attributes. + + :raises TypeError: If name or path is not a string. + """ + if not isinstance(self.name, str): + raise TypeError("name argument must be a string") + + if not isinstance(self.path, str): + raise TypeError("path argument must be a string")
+ + +
+[docs] + def upload_dataset(self, NodeId=-1): + """ + Upload the dataset to the database. + + :param NodeId: The NodeId associated with the dataset. + :type NodeId: int + + Notes: + - Assumes the file at self.path is a valid CSV file. + - The dataset is uploaded to the 'DataSets' table in the database. + """ + + data_df = pd.read_csv(self.path) + nodeId = NodeId + columns = data_df.columns.tolist() + + + data_df = process_eicu(data_df) + for index, row in data_df.iterrows(): + query_1 = "INSERT INTO DataSets(DataSetName,nodeId," + "".join( + f"{x}," for x in columns + ) + query_2 = f" VALUES ('{self.name}',{nodeId}, " + "".join( + f"{is_str(data_df, row, x)}," for x in columns + ) + query = query_1[:-1] + ")" + query_2[:-1] + ")" + + self.engine.execute(text(query))
+ + +
+[docs] + def delete_dataset(self): + """ + Delete the dataset from the database. + + Notes: + - Assumes the dataset name is unique in the 'DataSets' table. + """ + self.engine.execute(text(DELETE_DATASET), {"name": self.name})
+ + +
+[docs] + def update_data(self): + """ + Update the data in the dataset. + + Not implemented yet. + """ + pass
+ + +
+[docs] + @staticmethod + def list_alldatasets(engine): + """ + List all dataset names from the 'DataSets' table. + + :returns: A DataFrame containing the names of all datasets in the 'DataSets' table. + :rtype: pd.DataFrame + """ + res = pd.read_sql(text(SELECT_ALL_DATASET_NAMES), engine) + return res
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/NetManager/flsetup.html b/docs/_build/_modules/Medfl/NetManager/flsetup.html new file mode 100644 index 0000000..af2adda --- /dev/null +++ b/docs/_build/_modules/Medfl/NetManager/flsetup.html @@ -0,0 +1,447 @@ + + + + + + MEDfl.NetManager.flsetup — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for MEDfl.NetManager.flsetup

+from datetime import datetime
+
+
+from torch.utils.data import random_split, DataLoader, Dataset
+
+from MEDfl.LearningManager.federated_dataset import FederatedDataset
+from .net_helper import *
+from .net_manager_queries import *  # Import the sql_queries module
+from .network import Network
+
+from .node import Node
+
+
+
+[docs] +class FLsetup: +
+[docs] + def __init__(self, name: str, description: str, network: Network): + """Initialize a Federated Learning (FL) setup. + + Args: + name (str): The name of the FL setup. + description (str): A description of the FL setup. + network (Network): An instance of the Network class representing the network architecture. + """ + self.name = name + self.description = description + self.network = network + self.column_name = None + self.auto = 1 if self.column_name is not None else 0 + self.validate() + self.fed_dataset = None
+ + +
+[docs] + def validate(self): + """Validate name, description, and network.""" + if not isinstance(self.name, str): + raise TypeError("name argument must be a string") + + if not isinstance(self.description, str): + raise TypeError("description argument must be a string") + + if not isinstance(self.network, Network): + raise TypeError( + "network argument must be a MEDfl.NetManager.Network " + )
+ + +
+[docs] + def create(self): + """Create an FL setup.""" + creation_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + netid = get_netid_from_name(self.network.name) + my_eng.execute( + text(CREATE_FLSETUP_QUERY), + { + "name": self.name, + "description": self.description, + "creation_date": creation_date, + "net_id": netid, + "column_name": self.column_name, + }, + ) + self.id = get_flsetupid_from_name(self.name)
+ + +
+[docs] + def delete(self): + """Delete the FL setup.""" + if self.fed_dataset is not None: + self.fed_dataset.delete_Flsetup(FLsetupId=self.id) + my_eng.execute(text(DELETE_FLSETUP_QUERY), {"name": self.name})
+ + +
+[docs] + @classmethod + def read_setup(cls, FLsetupId: int): + """Read the FL setup by FLsetupId. + + Args: + FLsetupId (int): The id of the FL setup to read. + + Returns: + FLsetup: An instance of the FLsetup class with the specified FLsetupId. + """ + res = pd.read_sql( + text(READ_SETUP_QUERY), my_eng, params={"flsetup_id": FLsetupId} + ).iloc[0] + + network_res = pd.read_sql( + text(READ_NETWORK_BY_ID_QUERY), + my_eng, + params={"net_id": int(res["NetId"])}, + ).iloc[0] + network = Network(network_res["NetName"]) + setattr(network, "id", res["NetId"]) + fl_setup = cls(res["name"], res["description"], network) + if res["column_name"] == str(None): + res["column_name"] = None + setattr(fl_setup, "column_name", res["column_name"]) + setattr(fl_setup, "id", res["FLsetupId"]) + + return fl_setup
+ + +
+[docs] + @staticmethod + def list_allsetups(): + """List all the FL setups. + + Returns: + DataFrame: A DataFrame containing information about all the FL setups. + """ + Flsetups = pd.read_sql(text(READ_ALL_SETUPS_QUERY), my_eng) + return Flsetups
+ + +
+[docs] + def create_nodes_from_master_dataset(self, params_dict: dict): + """Create nodes from the master dataset. + + Args: + params_dict (dict): A dictionary containing parameters for node creation. + - column_name (str): The name of the column in the MasterDataset used to create nodes. + - train_nodes (list): A list of node names that will be used for training. + - test_nodes (list): A list of node names that will be used for testing. + + Returns: + list: A list of Node instances created from the master dataset. + """ + assert "column_name" in params_dict.keys() + column_name, train_nodes, test_nodes = ( + params_dict["column_name"], + params_dict["train_nodes"], + params_dict["test_nodes"], + ) + self.column_name = column_name + self.auto = 1 + + # Update the Column name of the auto flSetup + query = f"UPDATE FLsetup SET column_name = '{column_name}' WHERE name = '{self.name}'" + my_eng.execute(text(query)) + + + # Add Network to DB + # self.network.create_network() + + netid = get_netid_from_name(self.network.name) + + assert self.network.mtable_exists == 1 + node_names = pd.read_sql( + text(READ_DISTINCT_NODES_QUERY.format(column_name)), my_eng + ) + + nodes = [Node(val[0], 1) for val in node_names.values.tolist()] + + used_nodes = [] + + for node in nodes: + if node.name in train_nodes: + node.train = 1 + node.create_node(netid) + used_nodes.append(node) + if node.name in test_nodes: + node.train =0 + node.create_node(netid) + used_nodes.append(node) + return used_nodes
+ + +
+[docs] + def create_dataloader_from_node( + self, + node: Node, + output, + fill_strategy="mean", fit_encode=[], to_drop=[], + train_batch_size: int = 32, + test_batch_size: int = 1, + split_frac: float = 0.2, + dataset: Dataset = None, + + ): + """Create DataLoader from a Node. + + Args: + node (Node): The node from which to create DataLoader. + train_batch_size (int): The batch size for training data. + test_batch_size (int): The batch size for test data. + split_frac (float): The fraction of data to be used for training. + dataset (Dataset): The dataset to use. If None, the method will read the dataset from the node. + + Returns: + DataLoader: The DataLoader instances for training and testing. + """ + if dataset is None: + if self.column_name is not None: + dataset = process_data_after_reading( + node.get_dataset(self.column_name), output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop + ) + else: + dataset = process_data_after_reading( + node.get_dataset(), output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop) + + dataset_size = len(dataset) + traindata_size = int(dataset_size * (1 - split_frac)) + traindata, testdata = random_split( + dataset, [traindata_size, dataset_size - traindata_size] + ) + trainloader, testloader = DataLoader( + traindata, batch_size=train_batch_size + ), DataLoader(testdata, batch_size=test_batch_size) + return trainloader, testloader
+ + +
+[docs] + def create_federated_dataset( + self, output, fill_strategy="mean", fit_encode=[], to_drop=[], val_frac=0.1, test_frac=0.2 + ) -> FederatedDataset: + """Create a federated dataset. + + Args: + output (string): the output feature of the dataset + val_frac (float): The fraction of data to be used for validation. + test_frac (float): The fraction of data to be used for testing. + + Returns: + FederatedDataset: The FederatedDataset instance containing train, validation, and test data. + """ + + if not self.column_name: + to_drop.extend(["DataSetName" , "NodeId" , "DataSetId"]) + else : + to_drop.extend(["PatientId"]) + + netid = self.network.id + train_nodes = pd.read_sql( + text( + f"SELECT Nodes.NodeName FROM Nodes WHERE Nodes.NetId = {netid} AND Nodes.train = 1 " + ), + my_eng, + ) + test_nodes = pd.read_sql( + text( + f"SELECT Nodes.NodeName FROM Nodes WHERE Nodes.NetId = {netid} AND Nodes.train = 0 " + ), + my_eng, + ) + + train_nodes = [ + Node(val[0], 1, test_frac) for val in train_nodes.values.tolist() + ] + test_nodes = [Node(val[0], 0) for val in test_nodes.values.tolist()] + + trainloaders, valloaders, testloaders = [], [], [] + # if len(test_nodes) == 0: + # raise "test node empty" + if test_nodes is None: + _, testloader = self.create_dataloader_from_node( + train_nodes[0], output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop) + testloaders.append(testloader) + else: + for train_node in train_nodes: + train_valloader, testloader = self.create_dataloader_from_node( + train_node, output, fill_strategy=fill_strategy, + fit_encode=fit_encode, to_drop=to_drop,) + trainloader, valloader = self.create_dataloader_from_node( + train_node, + output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop, + test_batch_size=32, + split_frac=val_frac, + dataset=train_valloader.dataset, + ) + trainloaders.append(trainloader) + valloaders.append(valloader) + testloaders.append(testloader) + + for test_node in test_nodes: + _, testloader = self.create_dataloader_from_node( + test_node, output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop, split_frac=1.0 + ) + testloaders.append(testloader) + train_nodes_names = [node.name for node in train_nodes] + test_nodes_names = train_nodes_names + [ + node.name for node in test_nodes + ] + + # test_nodes_names = [ + # node.name for node in test_nodes + # ] + + # Add FlSetup on to the DataBase + # self.create() + + # self.network.update_network(FLsetupId=self.id) + fed_dataset = FederatedDataset( + self.name + "_Feddataset", + train_nodes_names, + test_nodes_names, + trainloaders, + valloaders, + testloaders, + ) + self.fed_dataset = fed_dataset + self.fed_dataset.create(self.id) + return self.fed_dataset
+ + + + + +
+[docs] + def get_flDataSet(self): + """ + Retrieve the federated dataset associated with the FL setup using the FL setup's name. + + Returns: + pandas.DataFrame: DataFrame containing the federated dataset information. + """ + return pd.read_sql( + text( + f"SELECT * FROM feddatasets WHERE FLsetupId = {get_flsetupid_from_name(self.name)}" + ), + my_eng, + )
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/NetManager/net_helper.html b/docs/_build/_modules/Medfl/NetManager/net_helper.html new file mode 100644 index 0000000..0ed8b9d --- /dev/null +++ b/docs/_build/_modules/Medfl/NetManager/net_helper.html @@ -0,0 +1,380 @@ + + + + + + MEDfl.NetManager.net_helper — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for MEDfl.NetManager.net_helper

+from sklearn.preprocessing import LabelEncoder
+from sklearn.impute import SimpleImputer
+
+from sqlalchemy import text
+
+import torch
+import pandas as pd
+from torch.utils.data import TensorDataset
+import numpy as np
+
+
+from scripts.base import my_eng
+
+
+
+[docs] +def is_str(data_df, row, x): + """ + Check if a column in a DataFrame is of type 'object' and convert the value accordingly. + + Args: + data_df (pandas.DataFrame): DataFrame containing the data. + row (pandas.Series): Data row. + x (str): Column name. + + Returns: + str or float: Processed value based on the column type. + """ + if data_df[x].dtype == "object": + x = f"'{row[x]}'" + else: + x = row[x] + return x
+ + + +
+[docs] +def process_eicu(data_df): + """ + Process eICU data by filling missing values with mean and replacing NaNs with 'Unknown'. + + Args: + data_df (pandas.DataFrame): Input data. + + Returns: + pandas.DataFrame: Processed data. + """ + # Identify numeric and non-numeric columns + numeric_columns = data_df.select_dtypes(include=[np.number]).columns + non_numeric_columns = data_df.select_dtypes(exclude=[np.number]).columns + + # Fill NaN in numeric columns with mean + data_df[numeric_columns] = data_df[numeric_columns].fillna(data_df[numeric_columns].mean()) + + # Fill NaN in non-numeric columns with 'Unknown' + data_df[non_numeric_columns] = data_df[non_numeric_columns].fillna('Unknown') + + try: + data_df = data_df.reset_index(drop=True) + except: + pass + + return data_df
+ + + +# remove indiserd columns after reading from the DB +
+[docs] +def process_data_after_reading(data, output, fill_strategy="mean", fit_encode=[], to_drop=[]): + """ + Process data after reading from the database, including encoding, dropping columns, and creating a PyTorch TensorDataset. + + Args: + data (pandas.DataFrame): Input data. + output (str): Output column name. + fill_strategy (str, optional): Imputation strategy for missing values. Default is "mean". + fit_encode (list, optional): List of columns to be label-encoded. Default is an empty list. + to_drop (list, optional): List of columns to be dropped from the DataFrame. Default is an empty list. + + Returns: + torch.utils.data.TensorDataset: Processed data as a PyTorch TensorDataset. + """ + + # Check if there is a DataSet assigned to the node + if (len(data) == 0): + raise "Node doesn't Have dataSet" + + encoder = LabelEncoder() + # En Code some columns + for s in fit_encode: + try: + data[s] = encoder.fit_transform(data[s]) + except: + raise print(s) + + # The output of the DATA + y = data[output] + + X = data + + + # remove indisered columns when reading the dataframe from the DB + for column in to_drop: + try: + X = X.drop( + [column], axis=1 + ) + except Exception as e: + raise e + + + # Get the DATAset Features + features = [col for col in X.columns if col != output] + + # Impute missing values using the mean strategy + try: + imputer = SimpleImputer(strategy=fill_strategy) + X[features] = imputer.fit_transform(X[features]) + except: + print() + + X = torch.tensor(X.values, dtype=torch.float32) + y = torch.tensor(y.values, dtype=torch.float32) + data = TensorDataset(X, y) + + return data
+ + + +
+[docs] +def get_nodeid_from_name(name): + """ + Get the NodeId from the Nodes table based on the NodeName. + + Args: + name (str): Node name. + + Returns: + int or None: NodeId or None if not found. + """ + + NodeId = int( + pd.read_sql( + text(f"SELECT NodeId FROM Nodes WHERE NodeName = '{name}'"), my_eng + ).iloc[0, 0] + ) + return NodeId
+ + + +
+[docs] +def get_netid_from_name(name): + """ + Get the Network Id from the Networks table based on the NetName. + + Args: + name (str): Network name. + + Returns: + int or None: NetId or None if not found. + """ + try: + NetId = int( + pd.read_sql( + text(f"SELECT NetId FROM Networks WHERE NetName = '{name}'"), + my_eng, + ).iloc[0, 0] + ) + except: + NetId = None + return NetId
+ + + +
+[docs] +def get_flsetupid_from_name(name): + """ + Get the FLsetupId from the FLsetup table based on the FL setup name. + + Args: + name (str): FL setup name. + + Returns: + int or None: FLsetupId or None if not found. + """ + try: + id = int( + pd.read_sql( + text(f"SELECT FLsetupId FROM FLsetup WHERE name = '{name}'"), + my_eng, + ).iloc[0, 0] + ) + except: + id = None + return id
+ + + +
+[docs] +def get_flpipeline_from_name(name): + """ + Get the FLpipeline Id from the FLpipeline table based on the FL pipeline name. + + Args: + name (str): FL pipeline name. + + Returns: + int or None: FLpipelineId or None if not found. + """ + try: + id = int( + pd.read_sql( + text(f"SELECT id FROM FLpipeline WHERE name = '{name}'"), + my_eng, + ).iloc[0, 0] + ) + except: + id = None + return id
+ + + +
+[docs] +def get_feddataset_id_from_name(name): + """ + Get the Federated dataset Id from the FedDatasets table based on the federated dataset name. + + Args: + name (str): Federated dataset name. + + Returns: + int or None: FedId or None if not found. + """ + try: + id = int( + pd.read_sql( + text(f"SELECT FedId FROM FedDatasets WHERE name = '{name}'"), + my_eng, + ).iloc[0, 0] + ) + except: + id = None + return id
+ + + +
+[docs] +def master_table_exists(): + """ + Check if the MasterDataset table exists in the database. + + Returns: + bool: True if the table exists, False otherwise. + """ + + return pd.read_sql( + text( + " SELECT EXISTS ( SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = 'MasterDataset' )" + ), + my_eng, + ).values[0][0]
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/NetManager/network.html b/docs/_build/_modules/Medfl/NetManager/network.html new file mode 100644 index 0000000..e6c2f1b --- /dev/null +++ b/docs/_build/_modules/Medfl/NetManager/network.html @@ -0,0 +1,303 @@ + + + + + + MEDfl.NetManager.network — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for MEDfl.NetManager.network

+# src/MEDfl/NetManager/network.py
+
+from MEDfl.LearningManager.utils import *
+
+from .net_helper import *
+from .net_manager_queries import (CREATE_MASTER_DATASET_TABLE_QUERY,
+                                  CREATE_DATASETS_TABLE_QUERY,
+                                  DELETE_NETWORK_QUERY,
+                                  INSERT_NETWORK_QUERY, LIST_ALL_NODES_QUERY,
+                                  UPDATE_NETWORK_QUERY, GET_NETWORK_QUERY)
+from .node import Node
+import pandas as pd
+from MEDfl.LearningManager.utils import params
+
+
+
+[docs] +class Network: + """ + A class representing a network. + + Attributes: + name (str): The name of the network. + mtable_exists (int): An integer flag indicating whether the MasterDataset table exists (1) or not (0). + """ + +
+[docs] + def __init__(self, name: str = ""): + """ + Initialize a Network instance. + + Parameters: + name (str): The name of the network. + """ + self.name = name + self.mtable_exists = int(master_table_exists()) + self.validate()
+ + +
+[docs] + def validate(self): + """Validate name""" + + if not isinstance(self.name, str): + raise TypeError("name argument must be a string")
+ + +
+[docs] + def create_network(self): + """Create a new network in the database.""" + my_eng.execute(text(INSERT_NETWORK_QUERY.format(name=self.name))) + self.id = get_netid_from_name(self.name)
+ + +
+[docs] + def use_network(self, network_name: str): + """Use a network in the database. + + Parameters: + network_name (str): The name of the network to use. + + Returns: + Network or None: An instance of the Network class if the network exists, else None. + + """ + network = pd.read_sql( + text(GET_NETWORK_QUERY.format(name=network_name)), + my_eng, + ) + + if (network.NetId[0]): + self.name = network.NetName[0] + self.id = network.NetId[0] + self.mtable_exists = int(master_table_exists()) + self.validate() + return self + else: + return None
+ + +
+[docs] + def delete_network(self): + """Delete the network from the database.""" + my_eng.execute(text(DELETE_NETWORK_QUERY.format(name=self.name)))
+ + +
+[docs] + def update_network(self, FLsetupId: int): + """Update the network's FLsetupId in the database. + + Parameters: + FLsetupId (int): The FLsetupId to update. + """ + my_eng.execute( + text(UPDATE_NETWORK_QUERY.format(FLsetupId=FLsetupId, id=self.id)) + )
+ + +
+[docs] + def add_node(self, node: Node): + """Add a node to the network. + + Parameters: + node (Node): The node to add. + """ + node.create_node(self.id)
+ + +
+[docs] + def list_allnodes(self): + """List all nodes in the network. + + Parameters: + None + + Returns: + DataFrame: A DataFrame containing information about all nodes in the network. + + """ + return pd.read_sql( + text(LIST_ALL_NODES_QUERY.format(name=self.name)), my_eng + )
+ + +
+[docs] + def create_master_dataset(self, path_to_csv: str = params['path_to_master_csv']): + """ + Create the MasterDataset table and insert dataset values. + + :param path_to_csv: Path to the CSV file containing the dataset. + """ + print(path_to_csv) + # Read the CSV file into a Pandas DataFrame + data_df = pd.read_csv(path_to_csv) + + # Process the data if needed (e.g., handle missing values, encode categorical variables) + # ... + + # Check if the MasterDataset table exists + + if self.mtable_exists != 1: + columns = data_df.columns.tolist() + columns_str = ",\n".join( + [ + f"{col} {column_map[str(data_df[col].dtype)]}" + for col in columns + ] + ) + my_eng.execute( + text(CREATE_MASTER_DATASET_TABLE_QUERY.format(columns_str)) + ) + my_eng.execute(text(CREATE_DATASETS_TABLE_QUERY.format(columns_str))) + + # Get the list of columns in the DataFrame + + data_df = process_eicu(data_df) + # Insert the dataset values into the MasterDataset table + + for index, row in data_df.iterrows(): + query_1 = "INSERT INTO MasterDataset(" + "".join( + f"{x}," for x in columns + ) + query_2 = f"VALUES (" + "".join( + f"{is_str(data_df, row, x)}," for x in columns + ) + query = query_1[:-1] + ")" + query_2[:-1] + ")" + my_eng.execute(text(query)) + + # Set mtable_exists flag to True + self.mtable_exists = 1
+ + +
+[docs] + @staticmethod + def list_allnetworks(): + """List all networks in the database. + Returns: + DataFrame: A DataFrame containing information about all networks in the database. + + """ + return pd.read_sql(text("SELECT * FROM Networks"), my_eng)
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/Medfl/NetManager/node.html b/docs/_build/_modules/Medfl/NetManager/node.html new file mode 100644 index 0000000..d92d8f8 --- /dev/null +++ b/docs/_build/_modules/Medfl/NetManager/node.html @@ -0,0 +1,330 @@ + + + + + + MEDfl.NetManager.node — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for MEDfl.NetManager.node

+import pandas as pd
+
+from scripts.base import *
+from .net_helper import *
+from .net_manager_queries import *
+from MEDfl.LearningManager.utils import params
+
+
+
+[docs] +class Node: + """ + A class representing a node in the network. + + Attributes: + name (str): The name of the node. + train (int): An integer flag representing whether the node is used for training (1) or testing (0). + test_fraction (float, optional): The fraction of data used for testing when train=1. Default is 0.2. + """ + +
+[docs] + def __init__( + self, name: str, train: int, test_fraction: float = 0.2, engine=my_eng + ): + """ + Initialize a Node instance. + + Parameters: + name (str): The name of the node. + train (int): An integer flag representing whether the node is used for training (1) or testing (0). + test_fraction (float, optional): The fraction of data used for testing when train=1. Default is 0.2. + """ + self.name = name + self.train = train + self.test_fraction = 1.0 if self.train == 0 else test_fraction + self.engine = engine
+ + +
+[docs] + def validate(self): + """Validate name, train, test_fraction""" + if not isinstance(self.name, str): + raise TypeError("name argument must be a string") + + if not isinstance(self.train, int): + raise TypeError("train argument must be an int") + + if not isinstance(self.test_fraction, float): + raise TypeError("test_fraction argument must be a float")
+ + +
+[docs] + def create_node(self, NetId: int): + """Create a node in the database. + Parameters: + NetId (int): The ID of the network to which the node belongs. + + Returns: + None + """ + self.engine.execute( + text(INSERT_NODE_QUERY.format(self.name, NetId, self.train)) + )
+ + +
+[docs] + def delete_node(self): + """Delete the node from the database.""" + self.engine.execute(text(DELETE_NODE_QUERY.format(self.name)))
+ + +
+[docs] + def check_dataset_compatibility(self, data_df): + """Check if the dataset is compatible with the master dataset. + Parameters: + data_df (DataFrame): The dataset to check. + + Returns: + None + """ + if master_table_exists() != 1: + print("MasterDataset doesn't exist") + else: + columns = data_df.columns.tolist() + # get master_dataset columns + master_table_columns = pd.read_sql( + text(SELECT_MASTER_COLUMNS_QUERY), self.engine + ).columns.tolist() + assert [x == y for x, y in zip(master_table_columns, columns)]
+ + +
+[docs] + def update_node(self): + """Update the node information (not implemented).""" + pass
+ + +
+[docs] + def get_dataset(self, column_name: str = None): + """Get the dataset for the node based on the given column name. + Parameters: + column_name (str, optional): The column name to filter the dataset. Default is None. + + Returns: + DataFrame: The dataset associated with the node. + """ + NodeId = get_nodeid_from_name(self.name) + if column_name is not None: + + node_dataset = pd.read_sql( + text( + SELECT_DATASET_BY_COLUMN_QUERY.format( + column_name, self.name + ) + ), + self.engine, + ) + + else: + node_dataset = pd.read_sql( + text(SELECT_DATASET_BY_NODE_ID_QUERY.format(NodeId)), + self.engine, + ) + return node_dataset
+ + +
+[docs] + def upload_dataset(self, dataset_name: str, path_to_csv: str = params['path_to_test_csv']): + """Upload the dataset to the database for the node. + Parameters: + dataset_name (str): The name of the dataset. + path_to_csv (str, optional): Path to the CSV file containing the dataset. Default is the path in params. + + Returns: + None + """ + data_df = pd.read_csv(path_to_csv) + + nodeId = get_nodeid_from_name(self.name) + columns = data_df.columns.tolist() + self.check_dataset_compatibility(data_df) + + data_df = process_eicu(data_df) + for index, row in data_df.iterrows(): + query_1 = "INSERT INTO DataSets(DataSetName,nodeId," + "".join( + f"{x}," for x in columns + ) + query_2 = f" VALUES ('{dataset_name}',{nodeId}, " + "".join( + f"{is_str(data_df, row, x)}," for x in columns + ) + query = query_1[:-1] + ")" + query_2[:-1] + ")" + self.engine.execute(text(query))
+ + +
+[docs] + def assign_dataset(self, dataset_name:str): + """Assigning existing dataSet to node + Parameters: + dataset_name (str): The name of the dataset to assign. + + Returns: + None + """ + + nodeId = get_nodeid_from_name(self.name) + query = f"UPDATE DataSets SET nodeId = {nodeId} WHERE DataSetName = '{dataset_name}'" + self.engine.execute(text(query))
+ + +
+[docs] + def unassign_dataset(self, dataset_name:str): + """unssigning existing dataSet to node + Parameters: + dataset_name (str): The name of the dataset to assign. + + Returns: + None + """ + + query = f"UPDATE DataSets SET nodeId = {-1} WHERE DataSetName = '{dataset_name}'" + self.engine.execute(text(query))
+ + +
+[docs] + def list_alldatasets(self): + """List all datasets associated with the node. + Returns: + DataFrame: A DataFrame containing information about all datasets associated with the node. + + """ + return pd.read_sql( + text(SELECT_ALL_DATASETS_QUERY.format(self.name)), my_eng + )
+ + +
+[docs] + @staticmethod + def list_allnodes(): + """List all nodes in the database. + Returns: + DataFrame: A DataFrame containing information about all nodes in the database. + + """ + query = text(SELECT_ALL_NODES_QUERY) + res = pd.read_sql(query, my_eng) + return res
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_modules/index.html b/docs/_build/_modules/index.html new file mode 100644 index 0000000..a3c3f28 --- /dev/null +++ b/docs/_build/_modules/index.html @@ -0,0 +1,123 @@ + + + + + + Overview: module code — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/docs/_build/_sources/Medfl.LearningManager.rst.txt b/docs/_build/_sources/Medfl.LearningManager.rst.txt new file mode 100644 index 0000000..4f9513e --- /dev/null +++ b/docs/_build/_sources/Medfl.LearningManager.rst.txt @@ -0,0 +1,85 @@ +MEDfl.LearningManager package +============================= + +Submodules +---------- + +MEDfl.LearningManager.client module +----------------------------------- + +.. automodule:: MEDfl.LearningManager.client + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.dynamicModal module +----------------------------------------- + +.. automodule:: MEDfl.LearningManager.dynamicModal + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.federated\_dataset module +----------------------------------------------- + +.. automodule:: MEDfl.LearningManager.federated_dataset + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.flpipeline module +--------------------------------------- + +.. automodule:: MEDfl.LearningManager.flpipeline + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.model module +---------------------------------- + +.. automodule:: MEDfl.LearningManager.model + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.plot module +--------------------------------- + +.. automodule:: MEDfl.LearningManager.plot + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.server module +----------------------------------- + +.. automodule:: MEDfl.LearningManager.server + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.strategy module +------------------------------------- + +.. automodule:: MEDfl.LearningManager.strategy + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.utils module +---------------------------------- + +.. automodule:: MEDfl.LearningManager.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: MEDfl.LearningManager + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/_sources/Medfl.NetManager.rst.txt b/docs/_build/_sources/Medfl.NetManager.rst.txt new file mode 100644 index 0000000..d36ef24 --- /dev/null +++ b/docs/_build/_sources/Medfl.NetManager.rst.txt @@ -0,0 +1,61 @@ +MEDfl.NetManager package +======================== + +Submodules +---------- + +MEDfl.NetManager.dataset module +------------------------------- + +.. automodule:: MEDfl.NetManager.dataset + :members: + :undoc-members: + :show-inheritance: + +MEDfl.NetManager.flsetup module +------------------------------- + +.. automodule:: MEDfl.NetManager.flsetup + :members: + :undoc-members: + :show-inheritance: + +MEDfl.NetManager.net\_helper module +----------------------------------- + +.. automodule:: MEDfl.NetManager.net_helper + :members: + :undoc-members: + :show-inheritance: + +MEDfl.NetManager.net\_manager\_queries module +--------------------------------------------- + +.. automodule:: MEDfl.NetManager.net_manager_queries + :members: + :undoc-members: + :show-inheritance: + +MEDfl.NetManager.network module +------------------------------- + +.. automodule:: MEDfl.NetManager.network + :members: + :undoc-members: + :show-inheritance: + +MEDfl.NetManager.node module +---------------------------- + +.. automodule:: MEDfl.NetManager.node + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: MEDfl.NetManager + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/_sources/Medfl.rst.txt b/docs/_build/_sources/Medfl.rst.txt new file mode 100644 index 0000000..95b5d99 --- /dev/null +++ b/docs/_build/_sources/Medfl.rst.txt @@ -0,0 +1,19 @@ +MEDfl package +============= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + MEDfl.LearningManager + MEDfl.NetManager + +Module contents +--------------- + +.. automodule:: MEDfl + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/_sources/index.rst.txt b/docs/_build/_sources/index.rst.txt new file mode 100644 index 0000000..53a2125 --- /dev/null +++ b/docs/_build/_sources/index.rst.txt @@ -0,0 +1,20 @@ +.. MEDfl-package-docs documentation master file, created by + sphinx-quickstart on Wed Jan 10 21:46:56 2024. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to MEDfl-package-docs's documentation! +============================================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + modules + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/_build/_sources/modules.rst.txt b/docs/_build/_sources/modules.rst.txt new file mode 100644 index 0000000..e332f8f --- /dev/null +++ b/docs/_build/_sources/modules.rst.txt @@ -0,0 +1,7 @@ +MEDfl +===== + +.. toctree:: + :maxdepth: 4 + + MEDfl diff --git a/docs/_build/_static/MEDomics.png b/docs/_build/_static/MEDomics.png new file mode 100644 index 0000000..4456fda Binary files /dev/null and b/docs/_build/_static/MEDomics.png differ diff --git a/docs/_build/_static/_sphinx_javascript_frameworks_compat.js b/docs/_build/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 0000000..8141580 --- /dev/null +++ b/docs/_build/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,123 @@ +/* Compatability shim for jQuery and underscores.js. + * + * Copyright Sphinx contributors + * Released under the two clause BSD licence + */ + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/docs/_build/_static/basic.css b/docs/_build/_static/basic.css new file mode 100644 index 0000000..30fee9d --- /dev/null +++ b/docs/_build/_static/basic.css @@ -0,0 +1,925 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/_build/_static/css/badge_only.css b/docs/_build/_static/css/badge_only.css new file mode 100644 index 0000000..c718cee --- /dev/null +++ b/docs/_build/_static/css/badge_only.css @@ -0,0 +1 @@ +.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/docs/_build/_static/css/fonts/Roboto-Slab-Bold.woff b/docs/_build/_static/css/fonts/Roboto-Slab-Bold.woff new file mode 100644 index 0000000..6cb6000 Binary files /dev/null and b/docs/_build/_static/css/fonts/Roboto-Slab-Bold.woff differ diff --git a/docs/_build/_static/css/fonts/Roboto-Slab-Bold.woff2 b/docs/_build/_static/css/fonts/Roboto-Slab-Bold.woff2 new file mode 100644 index 0000000..7059e23 Binary files /dev/null and b/docs/_build/_static/css/fonts/Roboto-Slab-Bold.woff2 differ diff --git a/docs/_build/_static/css/fonts/Roboto-Slab-Regular.woff b/docs/_build/_static/css/fonts/Roboto-Slab-Regular.woff new file mode 100644 index 0000000..f815f63 Binary files /dev/null and b/docs/_build/_static/css/fonts/Roboto-Slab-Regular.woff differ diff --git a/docs/_build/_static/css/fonts/Roboto-Slab-Regular.woff2 b/docs/_build/_static/css/fonts/Roboto-Slab-Regular.woff2 new file mode 100644 index 0000000..f2c76e5 Binary files /dev/null and b/docs/_build/_static/css/fonts/Roboto-Slab-Regular.woff2 differ diff --git a/docs/_build/_static/css/fonts/fontawesome-webfont.eot b/docs/_build/_static/css/fonts/fontawesome-webfont.eot new file mode 100644 index 0000000..e9f60ca Binary files /dev/null and b/docs/_build/_static/css/fonts/fontawesome-webfont.eot differ diff --git a/docs/_build/_static/css/fonts/fontawesome-webfont.svg b/docs/_build/_static/css/fonts/fontawesome-webfont.svg new file mode 100644 index 0000000..855c845 --- /dev/null +++ b/docs/_build/_static/css/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/_build/_static/css/fonts/fontawesome-webfont.ttf b/docs/_build/_static/css/fonts/fontawesome-webfont.ttf new file mode 100644 index 0000000..35acda2 Binary files /dev/null and b/docs/_build/_static/css/fonts/fontawesome-webfont.ttf differ diff --git a/docs/_build/_static/css/fonts/fontawesome-webfont.woff b/docs/_build/_static/css/fonts/fontawesome-webfont.woff new file mode 100644 index 0000000..400014a Binary files /dev/null and b/docs/_build/_static/css/fonts/fontawesome-webfont.woff differ diff --git a/docs/_build/_static/css/fonts/fontawesome-webfont.woff2 b/docs/_build/_static/css/fonts/fontawesome-webfont.woff2 new file mode 100644 index 0000000..4d13fc6 Binary files /dev/null and b/docs/_build/_static/css/fonts/fontawesome-webfont.woff2 differ diff --git a/docs/_build/_static/css/fonts/lato-bold-italic.woff b/docs/_build/_static/css/fonts/lato-bold-italic.woff new file mode 100644 index 0000000..88ad05b Binary files /dev/null and b/docs/_build/_static/css/fonts/lato-bold-italic.woff differ diff --git a/docs/_build/_static/css/fonts/lato-bold-italic.woff2 b/docs/_build/_static/css/fonts/lato-bold-italic.woff2 new file mode 100644 index 0000000..c4e3d80 Binary files /dev/null and b/docs/_build/_static/css/fonts/lato-bold-italic.woff2 differ diff --git a/docs/_build/_static/css/fonts/lato-bold.woff b/docs/_build/_static/css/fonts/lato-bold.woff new file mode 100644 index 0000000..c6dff51 Binary files /dev/null and b/docs/_build/_static/css/fonts/lato-bold.woff differ diff --git a/docs/_build/_static/css/fonts/lato-bold.woff2 b/docs/_build/_static/css/fonts/lato-bold.woff2 new file mode 100644 index 0000000..bb19504 Binary files /dev/null and b/docs/_build/_static/css/fonts/lato-bold.woff2 differ diff --git a/docs/_build/_static/css/fonts/lato-normal-italic.woff b/docs/_build/_static/css/fonts/lato-normal-italic.woff new file mode 100644 index 0000000..76114bc Binary files /dev/null and b/docs/_build/_static/css/fonts/lato-normal-italic.woff differ diff --git a/docs/_build/_static/css/fonts/lato-normal-italic.woff2 b/docs/_build/_static/css/fonts/lato-normal-italic.woff2 new file mode 100644 index 0000000..3404f37 Binary files /dev/null and b/docs/_build/_static/css/fonts/lato-normal-italic.woff2 differ diff --git a/docs/_build/_static/css/fonts/lato-normal.woff b/docs/_build/_static/css/fonts/lato-normal.woff new file mode 100644 index 0000000..ae1307f Binary files /dev/null and b/docs/_build/_static/css/fonts/lato-normal.woff differ diff --git a/docs/_build/_static/css/fonts/lato-normal.woff2 b/docs/_build/_static/css/fonts/lato-normal.woff2 new file mode 100644 index 0000000..3bf9843 Binary files /dev/null and b/docs/_build/_static/css/fonts/lato-normal.woff2 differ diff --git a/docs/_build/_static/css/theme.css b/docs/_build/_static/css/theme.css new file mode 100644 index 0000000..19a446a --- /dev/null +++ b/docs/_build/_static/css/theme.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs>li{display:inline-block;padding-top:5px}.wy-breadcrumbs>li.wy-breadcrumbs-aside{float:right}.rst-content .wy-breadcrumbs>li code,.rst-content .wy-breadcrumbs>li tt,.wy-breadcrumbs>li .rst-content tt,.wy-breadcrumbs>li code{all:inherit;color:inherit}.breadcrumb-item:before{content:"/";color:#bbb;font-size:13px;padding:0 6px 0 3px}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content p a{overflow-wrap:anywhere}.rst-content .wy-table td p,.rst-content .wy-table td ul,.rst-content .wy-table th p,.rst-content .wy-table th ul,.rst-content table.docutils td p,.rst-content table.docutils td ul,.rst-content table.docutils th p,.rst-content table.docutils th ul,.rst-content table.field-list td p,.rst-content table.field-list td ul,.rst-content table.field-list th p,.rst-content table.field-list th ul{font-size:inherit}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .citation-reference>span.fn-bracket,.rst-content .footnote-reference>span.fn-bracket{display:none}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:auto minmax(80%,95%)}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{display:inline-grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{display:grid;grid-template-columns:auto auto minmax(.65rem,auto) minmax(40%,95%)}html.writer-html5 .rst-content aside.citation>span.label,html.writer-html5 .rst-content aside.footnote>span.label,html.writer-html5 .rst-content div.citation>span.label{grid-column-start:1;grid-column-end:2}html.writer-html5 .rst-content aside.citation>span.backrefs,html.writer-html5 .rst-content aside.footnote>span.backrefs,html.writer-html5 .rst-content div.citation>span.backrefs{grid-column-start:2;grid-column-end:3;grid-row-start:1;grid-row-end:3}html.writer-html5 .rst-content aside.citation>p,html.writer-html5 .rst-content aside.footnote>p,html.writer-html5 .rst-content div.citation>p{grid-column-start:4;grid-column-end:5}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{margin-bottom:24px}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a{word-break:keep-all}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.citation>dd p,html.writer-html5 .rst-content dl.footnote>dd p{font-size:.9rem}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{padding-left:1rem;padding-right:1rem;font-size:.9rem;line-height:1.2rem}html.writer-html5 .rst-content aside.citation p,html.writer-html5 .rst-content aside.footnote p,html.writer-html5 .rst-content div.citation p{font-size:.9rem;line-height:1.2rem;margin-bottom:12px}html.writer-html5 .rst-content aside.citation span.backrefs,html.writer-html5 .rst-content aside.footnote span.backrefs,html.writer-html5 .rst-content div.citation span.backrefs{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content aside.citation span.backrefs>a,html.writer-html5 .rst-content aside.footnote span.backrefs>a,html.writer-html5 .rst-content div.citation span.backrefs>a{word-break:keep-all}html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content aside.citation span.label,html.writer-html5 .rst-content aside.footnote span.label,html.writer-html5 .rst-content div.citation span.label{line-height:1.2rem}html.writer-html5 .rst-content aside.citation-list,html.writer-html5 .rst-content aside.footnote-list,html.writer-html5 .rst-content div.citation-list{margin-bottom:24px}html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content aside.footnote-list aside.footnote,html.writer-html5 .rst-content div.citation-list>div.citation,html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content aside.footnote-list aside.footnote code,html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,html.writer-html5 .rst-content aside.footnote code,html.writer-html5 .rst-content aside.footnote tt,html.writer-html5 .rst-content div.citation-list>div.citation code,html.writer-html5 .rst-content div.citation-list>div.citation tt,html.writer-html5 .rst-content dl.citation code,html.writer-html5 .rst-content dl.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040;overflow-wrap:normal}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl dd>ol:last-child,.rst-content dl dd>p:last-child,.rst-content dl dd>table:last-child,.rst-content dl dd>ul:last-child{margin-bottom:0}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel,.rst-content .menuselection{font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .guilabel,.rst-content .menuselection{border:1px solid #7fbbe3;background:#e7f2fa}.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd{color:inherit;font-size:80%;background-color:#fff;border:1px solid #a6a6a6;border-radius:4px;box-shadow:0 2px grey;padding:2.4px 6px;margin:auto 0}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/docs/_build/_static/dark_mode_css/custom.css b/docs/_build/_static/dark_mode_css/custom.css new file mode 100644 index 0000000..989c2ad --- /dev/null +++ b/docs/_build/_static/dark_mode_css/custom.css @@ -0,0 +1,77 @@ +.wy-side-nav-search input[type='text'] { + border-radius: 3px; +} + +input[type='color'], +input[type='date'], +input[type='datetime-local'], +input[type='datetime'], +input[type='email'], +input[type='month'], +input[type='number'], +input[type='password'], +input[type='search'], +input[type='tel'], +input[type='text'], +input[type='time'], +input[type='url'], +input[type='week'] { + box-shadow: none; +} + +.theme-switcher { + border-radius: 50%; + position: fixed; + right: 1.6em; + bottom: 1.4em; + z-index: 3; + border: none; + height: 2.2em; + width: 2.2em; + background-color: #fcfcfc; + font-size: 20px; + -webkit-box-shadow: 0px 3px 14px 4px rgba(0, 0, 0, 0.62); + box-shadow: 0px 3px 14px 4px rgba(0, 0, 0, 0.62); + color: #404040; + transition: all 0.3s ease-in-out; +} + +.wy-nav-content a, +.wy-nav-content a:visited { + color: #3091d1; +} + +body, +.wy-nav-content-wrap, +.wy-nav-content, +.section, +.highlight, +.rst-content div[class^='highlight'], +.wy-nav-content a, +.btn-neutral, +.btn, +footer, +.wy-nav-side, +.wy-menu-vertical li, +.wy-menu-vertical a, +.wy-side-nav-search .wy-dropdown, +.wy-side-nav-search a, +.wy-side-nav-search input, +html.writer-html4 .rst-content dl:not(.docutils) > dt, +html.writer-html5 + .rst-content + dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) + > dt, +.rst-content code, +.rst-content tt, +html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list) > dt, +html.writer-html5 + .rst-content + dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) + dl:not(.field-list) + > dt, +code, +.rst-content code.xref, +.rst-content tt.xref { + transition: all 0.2s ease-in-out; +} diff --git a/docs/_build/_static/dark_mode_css/dark.css b/docs/_build/_static/dark_mode_css/dark.css new file mode 100644 index 0000000..e863889 --- /dev/null +++ b/docs/_build/_static/dark_mode_css/dark.css @@ -0,0 +1,520 @@ +:root { + --dark-text-color: #c1c1c1; + --dark-link-color: #249ee8; +} + +html[data-theme="dark"] body { + color: #bfbfbf; +} + +html[data-theme="dark"] .wy-nav-content-wrap { + background-color: #101010; +} + +html[data-theme="dark"] .wy-nav-content { + background-color: #141414; +} + +html[data-theme="dark"] .section { + color: var(--dark-text-color); +} + +html[data-theme="dark"] .highlight { + background-color: #17181c; +} + +html[data-theme="dark"] .highlight .nn { + color: var(--dark-text-color); +} + +html[data-theme="dark"] .highlight .nb { + color: #8bb8df; +} + +html[data-theme="dark"] .highlight .nv { + color: #40ffff; +} + +html[data-theme="dark"] .highlight .kn, +html[data-theme="dark"] .highlight .kc, +html[data-theme="dark"] .highlight .k { + color: #41c2ea; +} + +html[data-theme="dark"] .highlight .s1, +html[data-theme="dark"] .highlight .s2 { + color: #b3e87f; +} + +html[data-theme="dark"] .highlight .nt { + color: #ccb350; +} + +html[data-theme="dark"] .highlight .c1 { + color: #686868; +} + +html[data-theme="dark"] .highlight .hll { + background-color: #002c4d; +} + +html[data-theme="dark"] .rst-content div[class^="highlight"] { + border-color: #1a1a1a; +} + +html[data-theme="dark"] .wy-nav-content a, +html[data-theme="dark"] .wy-nav-content a:visited { + color: var(--dark-link-color); +} + +html[data-theme="dark"] .btn-neutral { + background-color: #17181c !important; +} + +html[data-theme="dark"] .btn-neutral:hover { + background-color: #101114 !important; +} + +html[data-theme="dark"] .btn-neutral:visited { + color: #c1c1c1 !important; +} + +html[data-theme="dark"] .btn { + box-shadow: none; +} + +html[data-theme="dark"] footer { + color: #bdbdbd; +} + +html[data-theme="dark"] .wy-nav-side { + background-color: #0d0d0d; +} + +html[data-theme="dark"] .wy-menu-vertical li.current { + background-color: #141414; +} + +html[data-theme="dark"] .wy-menu-vertical li.current > a, +html[data-theme="dark"] .wy-menu-vertical li.on a { + background-color: #141415; + color: var(--dark-text-color); +} + +html[data-theme="dark"] .wy-menu-vertical li.toctree-l1.current > a, +html[data-theme="dark"] .wy-menu-vertical li.current a { + border-color: #0b0c0d; +} + +html[data-theme="dark"] .wy-menu-vertical li.current a { + color: #bbb; +} + +html[data-theme="dark"] .wy-menu-vertical li.current a:hover { + background-color: #222; +} + +html[data-theme="dark"] .wy-menu-vertical a:hover, +html[data-theme="dark"] .wy-menu-vertical li.current > a:hover, +html[data-theme="dark"] .wy-menu-vertical li.on a:hover { + background-color: #1e1e1e; +} + +html[data-theme="dark"] .wy-menu-vertical li.toctree-l2.current > a, +html[data-theme="dark"] + .wy-menu-vertical + li.toctree-l2.current + li.toctree-l3 + > a { + background-color: #18181a; +} + +html[data-theme="dark"] .wy-side-nav-search { + background-color: #0b152d; +} + +html[data-theme="dark"] .wy-side-nav-search .wy-dropdown > a, +html[data-theme="dark"] .wy-side-nav-search > a { + color: #ddd; +} + +html[data-theme="dark"] .wy-side-nav-search input[type="text"] { + border-color: #111; + background-color: #141414; + color: var(--dark-text-color); +} + +html[data-theme="dark"] .theme-switcher { + background-color: #0b0c0d; + color: var(--dark-text-color); +} + +html[data-theme="dark"].writer-html4 .rst-content dl:not(.docutils) > dt, +html[data-theme="dark"].writer-html5 + .rst-content + dl[class]:not(.option-list):not(.field-list):not(.footnote):not( + .glossary + ):not(.simple) + > dt { + background-color: #0b0b0b; + color: #007dce; + border-color: #282828; +} + +html[data-theme="dark"] .rst-content code, +html[data-theme="dark"] .rst-content tt { + color: var(--dark-text-color); +} + +html[data-theme="dark"].writer-html4 + .rst-content + dl:not(.docutils) + dl:not(.field-list) + > dt, +html[data-theme="dark"].writer-html5 + .rst-content + dl[class]:not(.option-list):not(.field-list):not(.footnote):not( + .glossary + ):not(.simple) + dl:not(.field-list) + > dt { + background-color: #0f0f0f; + color: #959595; + border-color: #2b2b2b; +} + +html[data-theme="dark"] .rst-content code, +html[data-theme="dark"] .rst-content tt, +html[data-theme="dark"] code { + background-color: #2d2d2d; + border-color: #1c1c1c; +} + +html[data-theme="dark"] .rst-content code.xref, +html[data-theme="dark"] .rst-content tt.xref, +html[data-theme="dark"] a .rst-content code, +html[data-theme="dark"] a .rst-content tt { + color: #cecece; +} + +html[data-theme="dark"] .rst-content .hint, +html[data-theme="dark"] .rst-content .important, +html[data-theme="dark"] .rst-content .tip, +html[data-theme="dark"] .rst-content .wy-alert-success.admonition, +html[data-theme="dark"] .rst-content .wy-alert-success.admonition-todo, +html[data-theme="dark"] .rst-content .wy-alert-success.attention, +html[data-theme="dark"] .rst-content .wy-alert-success.caution, +html[data-theme="dark"] .rst-content .wy-alert-success.danger, +html[data-theme="dark"] .rst-content .wy-alert-success.error, +html[data-theme="dark"] .rst-content .wy-alert-success.note, +html[data-theme="dark"] .rst-content .wy-alert-success.seealso, +html[data-theme="dark"] .rst-content .wy-alert-success.warning, +html[data-theme="dark"] .wy-alert.wy-alert-success { + background-color: #00392e; +} + +html[data-theme="dark"] .rst-content .hint .admonition-title, +html[data-theme="dark"] .rst-content .hint .wy-alert-title, +html[data-theme="dark"] .rst-content .important .admonition-title, +html[data-theme="dark"] .rst-content .important .wy-alert-title, +html[data-theme="dark"] .rst-content .tip .admonition-title, +html[data-theme="dark"] .rst-content .tip .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-success.admonition-todo + .admonition-title, +html[data-theme="dark"] + .rst-content + .wy-alert-success.admonition-todo + .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-success.admonition + .admonition-title, +html[data-theme="dark"] + .rst-content + .wy-alert-success.admonition + .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-success.attention + .admonition-title, +html[data-theme="dark"] + .rst-content + .wy-alert-success.attention + .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-success.caution + .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-success.caution .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-success.danger .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-success.danger .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-success.error .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-success.error .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-success.note .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-success.note .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-success.seealso + .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-success.seealso .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-success.warning + .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-success.warning .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert.wy-alert-success + .admonition-title, +html[data-theme="dark"] + .wy-alert.wy-alert-success + .rst-content + .admonition-title, +html[data-theme="dark"] .wy-alert.wy-alert-success .wy-alert-title { + background-color: #006a56; +} + +html[data-theme="dark"] .rst-content .admonition, +html[data-theme="dark"] .rst-content .note, +html[data-theme="dark"] .rst-content .seealso, +html[data-theme="dark"] .rst-content .wy-alert-info.admonition, +html[data-theme="dark"] .rst-content .wy-alert-info.admonition-todo, +html[data-theme="dark"] .rst-content .wy-alert-info.attention, +html[data-theme="dark"] .rst-content .wy-alert-info.caution, +html[data-theme="dark"] .rst-content .wy-alert-info.danger, +html[data-theme="dark"] .rst-content .wy-alert-info.error, +html[data-theme="dark"] .rst-content .wy-alert-info.hint, +html[data-theme="dark"] .rst-content .wy-alert-info.important, +html[data-theme="dark"] .rst-content .wy-alert-info.tip, +html[data-theme="dark"] .rst-content .wy-alert-info.warning, +html[data-theme="dark"] .wy-alert.wy-alert-info { + background-color: #002c4d; +} + +html[data-theme="dark"] .rst-content .admonition .admonition-title, +html[data-theme="dark"] .rst-content .note .admonition-title, +html[data-theme="dark"] .rst-content .note .wy-alert-title, +html[data-theme="dark"] .rst-content .seealso .admonition-title, +html[data-theme="dark"] .rst-content .seealso .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-info.admonition-todo + .admonition-title, +html[data-theme="dark"] + .rst-content + .wy-alert-info.admonition-todo + .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-info.admonition + .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-info.admonition .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-info.attention .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-info.attention .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-info.caution .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-info.caution .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-info.danger .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-info.danger .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-info.error .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-info.error .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-info.hint .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-info.hint .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-info.important .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-info.important .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-info.tip .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-info.tip .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-info.warning .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-info.warning .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert.wy-alert-info .admonition-title, +html[data-theme="dark"] .wy-alert.wy-alert-info .rst-content .admonition-title, +html[data-theme="dark"] .wy-alert.wy-alert-info .wy-alert-title { + background-color: #004a7b; +} + +html[data-theme="dark"] .rst-content .admonition-todo, +html[data-theme="dark"] .rst-content .attention, +html[data-theme="dark"] .rst-content .caution, +html[data-theme="dark"] .rst-content .warning, +html[data-theme="dark"] .rst-content .wy-alert-warning.admonition, +html[data-theme="dark"] .rst-content .wy-alert-warning.danger, +html[data-theme="dark"] .rst-content .wy-alert-warning.error, +html[data-theme="dark"] .rst-content .wy-alert-warning.hint, +html[data-theme="dark"] .rst-content .wy-alert-warning.important, +html[data-theme="dark"] .rst-content .wy-alert-warning.note, +html[data-theme="dark"] .rst-content .wy-alert-warning.seealso, +html[data-theme="dark"] .rst-content .wy-alert-warning.tip, +html[data-theme="dark"] .wy-alert.wy-alert-warning { + background-color: #533500; +} + +html[data-theme="dark"] .rst-content .admonition-todo .admonition-title, +html[data-theme="dark"] .rst-content .admonition-todo .wy-alert-title, +html[data-theme="dark"] .rst-content .attention .admonition-title, +html[data-theme="dark"] .rst-content .attention .wy-alert-title, +html[data-theme="dark"] .rst-content .caution .admonition-title, +html[data-theme="dark"] .rst-content .caution .wy-alert-title, +html[data-theme="dark"] .rst-content .warning .admonition-title, +html[data-theme="dark"] .rst-content .warning .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-warning.admonition + .admonition-title, +html[data-theme="dark"] + .rst-content + .wy-alert-warning.admonition + .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-warning.danger .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-warning.danger .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-warning.error .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-warning.error .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-warning.hint .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-warning.hint .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-warning.important + .admonition-title, +html[data-theme="dark"] + .rst-content + .wy-alert-warning.important + .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-warning.note .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-warning.note .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-warning.seealso + .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-warning.seealso .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-warning.tip .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-warning.tip .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert.wy-alert-warning + .admonition-title, +html[data-theme="dark"] + .wy-alert.wy-alert-warning + .rst-content + .admonition-title, +html[data-theme="dark"] .wy-alert.wy-alert-warning .wy-alert-title { + background-color: #803b00; +} + +html[data-theme="dark"] .rst-content .danger, +html[data-theme="dark"] .rst-content .error, +html[data-theme="dark"] .rst-content .wy-alert-danger.admonition, +html[data-theme="dark"] .rst-content .wy-alert-danger.admonition-todo, +html[data-theme="dark"] .rst-content .wy-alert-danger.attention, +html[data-theme="dark"] .rst-content .wy-alert-danger.caution, +html[data-theme="dark"] .rst-content .wy-alert-danger.hint, +html[data-theme="dark"] .rst-content .wy-alert-danger.important, +html[data-theme="dark"] .rst-content .wy-alert-danger.note, +html[data-theme="dark"] .rst-content .wy-alert-danger.seealso, +html[data-theme="dark"] .rst-content .wy-alert-danger.tip, +html[data-theme="dark"] .rst-content .wy-alert-danger.warning, +html[data-theme="dark"] .wy-alert.wy-alert-danger { + background-color: #82231a; +} + +html[data-theme="dark"] .rst-content .danger .admonition-title, +html[data-theme="dark"] .rst-content .danger .wy-alert-title, +html[data-theme="dark"] .rst-content .error .admonition-title, +html[data-theme="dark"] .rst-content .error .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-danger.admonition-todo + .admonition-title, +html[data-theme="dark"] + .rst-content + .wy-alert-danger.admonition-todo + .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-danger.admonition + .admonition-title, +html[data-theme="dark"] + .rst-content + .wy-alert-danger.admonition + .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-danger.attention + .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.attention .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.caution .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.caution .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.hint .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.hint .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert-danger.important + .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.important .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.note .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.note .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.seealso .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.seealso .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.tip .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.tip .wy-alert-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.warning .admonition-title, +html[data-theme="dark"] .rst-content .wy-alert-danger.warning .wy-alert-title, +html[data-theme="dark"] + .rst-content + .wy-alert.wy-alert-danger + .admonition-title, +html[data-theme="dark"] + .wy-alert.wy-alert-danger + .rst-content + .admonition-title, +html[data-theme="dark"] .wy-alert.wy-alert-danger .wy-alert-title { + background-color: #b9372b; +} + +html[data-theme="dark"] .wy-nav-top { + background-color: #0b152d; +} + +html[data-theme="dark"] .rst-content table.docutils thead, +html[data-theme="dark"] .rst-content table.field-list thead, +html[data-theme="dark"] .wy-table thead { + color: var(--dark-text-color); +} + +html[data-theme="dark"] + .rst-content + table.docutils:not(.field-list) + tr:nth-child(2n-1) + td, +html[data-theme="dark"] .wy-table-backed, +html[data-theme="dark"] html[data-theme="dark"] .wy-table-odd td, +html[data-theme="dark"] .wy-table-striped tr:nth-child(2n-1) td { + background-color: #181818; +} + +html[data-theme="dark"] .rst-content table.docutils td, +html[data-theme="dark"] .wy-table-bordered-all td, +html[data-theme="dark"].writer-html5 .rst-content table.docutils th, +html[data-theme="dark"] .rst-content table.docutils, +html[data-theme="dark"] .wy-table-bordered-all { + border-color: #262626; +} + +html[data-theme="dark"] .rst-content table.docutils caption, +html[data-theme="dark"] .rst-content table.field-list caption, +html[data-theme="dark"] .wy-table caption { + color: var(--dark-text-color); +} + +html[data-theme="dark"] .wy-menu-vertical li.toctree-l3.current > a, +html[data-theme="dark"] + .wy-menu-vertical + li.toctree-l3.current + li.toctree-l4 + > a { + background-color: #18181a; +} + +html[data-theme="dark"] .guilabel { + background-color: #343434; + border-color: #4d4d4d; +} diff --git a/docs/_build/_static/dark_mode_css/general.css b/docs/_build/_static/dark_mode_css/general.css new file mode 100644 index 0000000..aa614f8 --- /dev/null +++ b/docs/_build/_static/dark_mode_css/general.css @@ -0,0 +1,68 @@ +input[type='color'], +input[type='date'], +input[type='datetime-local'], +input[type='datetime'], +input[type='email'], +input[type='month'], +input[type='number'], +input[type='password'], +input[type='search'], +input[type='tel'], +input[type='text'], +input[type='time'], +input[type='url'], +input[type='week'] { + box-shadow: none; +} + +.theme-switcher { + border-radius: 50%; + position: fixed; + right: 1.6em; + bottom: 1.4em; + z-index: 3; + border: none; + height: 2.2em; + width: 2.2em; + background-color: #fcfcfc; + font-size: 20px; + -webkit-box-shadow: 0px 3px 14px 4px rgba(0, 0, 0, 0.62); + box-shadow: 0px 3px 14px 4px rgba(0, 0, 0, 0.62); + color: #404040; + transition: all 0.3s ease-in-out; +} + +body, +.wy-nav-content-wrap, +.wy-nav-content, +.section, +.highlight, +.rst-content div[class^='highlight'], +.wy-nav-content a, +.btn-neutral, +.btn, +footer, +.wy-nav-side, +.wy-menu-vertical li, +.wy-menu-vertical a, +.wy-side-nav-search .wy-dropdown, +.wy-side-nav-search a, +.wy-side-nav-search input, +html.writer-html4 .rst-content dl:not(.docutils) > dt, +html.writer-html5 + .rst-content + dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) + > dt, +.rst-content code, +.rst-content tt, +html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list) > dt, +html.writer-html5 + .rst-content + dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) + dl:not(.field-list) + > dt, +code, +.rst-content code.xref, +.rst-content tt.xref { + transition: all 0.2s ease-in-out; +} diff --git a/docs/_build/_static/dark_mode_js/default_dark.js b/docs/_build/_static/dark_mode_js/default_dark.js new file mode 100644 index 0000000..ea63e07 --- /dev/null +++ b/docs/_build/_static/dark_mode_js/default_dark.js @@ -0,0 +1,13 @@ +const loadTheme = () => { + let theme = localStorage.getItem('theme'); + + if (theme !== null) { + if (theme === 'dark') + document.documentElement.setAttribute('data-theme', 'dark'); + } else { + localStorage.setItem('theme', 'dark'); + document.documentElement.setAttribute('data-theme', 'dark'); + } +}; + +loadTheme(); diff --git a/docs/_build/_static/dark_mode_js/default_light.js b/docs/_build/_static/dark_mode_js/default_light.js new file mode 100644 index 0000000..2b19f92 --- /dev/null +++ b/docs/_build/_static/dark_mode_js/default_light.js @@ -0,0 +1,13 @@ +const loadTheme = () => { + let theme = localStorage.getItem('theme'); + + if (theme !== null) { + if (theme === 'dark') + document.documentElement.setAttribute('data-theme', 'dark'); + } else { + localStorage.setItem('theme', 'light'); + document.documentElement.setAttribute('data-theme', 'light'); + } +}; + +loadTheme(); diff --git a/docs/_build/_static/dark_mode_js/theme_switcher.js b/docs/_build/_static/dark_mode_js/theme_switcher.js new file mode 100644 index 0000000..8e26055 --- /dev/null +++ b/docs/_build/_static/dark_mode_js/theme_switcher.js @@ -0,0 +1,39 @@ +const createThemeSwitcher = () => { + let btn = document.createElement('BUTTON'); + btn.className = 'theme-switcher'; + btn.id = 'themeSwitcher'; + btn.innerHTML = + ''; + document.body.appendChild(btn); + + if (localStorage.getItem('theme') === 'dark') $('#themeMoon').hide(0); + else $('#themeSun').hide(0); +}; + +$(document).ready(() => { + createThemeSwitcher(); + $('#themeSwitcher').click(switchTheme); + + $('footer').html( + $('footer').html() + + 'Dark theme provided by MrDogeBro.' + ); +}); + +const switchTheme = () => { + if (localStorage.getItem('theme') === 'dark') { + localStorage.setItem('theme', 'light'); + document.documentElement.setAttribute('data-theme', 'light'); + + $('#themeSun').fadeOut(200, () => { + $('#themeMoon').fadeIn(200); + }); + } else { + localStorage.setItem('theme', 'dark'); + document.documentElement.setAttribute('data-theme', 'dark'); + + $('#themeMoon').fadeOut(200, () => { + $('#themeSun').fadeIn(200); + }); + } +}; diff --git a/docs/_build/_static/doctools.js b/docs/_build/_static/doctools.js new file mode 100644 index 0000000..d06a71d --- /dev/null +++ b/docs/_build/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/docs/_build/_static/documentation_options.js b/docs/_build/_static/documentation_options.js new file mode 100644 index 0000000..13d90ff --- /dev/null +++ b/docs/_build/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '0.1.0', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/docs/_build/_static/file.png b/docs/_build/_static/file.png new file mode 100644 index 0000000..a858a41 Binary files /dev/null and b/docs/_build/_static/file.png differ diff --git a/docs/_build/_static/jquery.js b/docs/_build/_static/jquery.js new file mode 100644 index 0000000..c4c6022 --- /dev/null +++ b/docs/_build/_static/jquery.js @@ -0,0 +1,2 @@ +/*! jQuery v3.6.0 | (c) OpenJS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.6.0",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/docs/_build/_static/js/html5shiv.min.js b/docs/_build/_static/js/html5shiv.min.js new file mode 100644 index 0000000..cd1c674 --- /dev/null +++ b/docs/_build/_static/js/html5shiv.min.js @@ -0,0 +1,4 @@ +/** +* @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed +*/ +!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/docs/_build/_static/js/theme.js b/docs/_build/_static/js/theme.js new file mode 100644 index 0000000..1fddb6e --- /dev/null +++ b/docs/_build/_static/js/theme.js @@ -0,0 +1 @@ +!function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/docs/_build/_static/minus.png b/docs/_build/_static/minus.png new file mode 100644 index 0000000..d96755f Binary files /dev/null and b/docs/_build/_static/minus.png differ diff --git a/docs/_build/_static/plus.png b/docs/_build/_static/plus.png new file mode 100644 index 0000000..7107cec Binary files /dev/null and b/docs/_build/_static/plus.png differ diff --git a/docs/_build/_static/pygments.css b/docs/_build/_static/pygments.css new file mode 100644 index 0000000..84ab303 --- /dev/null +++ b/docs/_build/_static/pygments.css @@ -0,0 +1,75 @@ +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #008000; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #9C6500 } /* Comment.Preproc */ +.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +.highlight .gr { color: #E40000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #008400 } /* Generic.Inserted */ +.highlight .go { color: #717171 } /* Generic.Output */ +.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #008000 } /* Keyword.Pseudo */ +.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #B00040 } /* Keyword.Type */ +.highlight .m { color: #666666 } /* Literal.Number */ +.highlight .s { color: #BA2121 } /* Literal.String */ +.highlight .na { color: #687822 } /* Name.Attribute */ +.highlight .nb { color: #008000 } /* Name.Builtin */ +.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */ +.highlight .no { color: #880000 } /* Name.Constant */ +.highlight .nd { color: #AA22FF } /* Name.Decorator */ +.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #0000FF } /* Name.Function */ +.highlight .nl { color: #767600 } /* Name.Label */ +.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #19177C } /* Name.Variable */ +.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #666666 } /* Literal.Number.Bin */ +.highlight .mf { color: #666666 } /* Literal.Number.Float */ +.highlight .mh { color: #666666 } /* Literal.Number.Hex */ +.highlight .mi { color: #666666 } /* Literal.Number.Integer */ +.highlight .mo { color: #666666 } /* Literal.Number.Oct */ +.highlight .sa { color: #BA2121 } /* Literal.String.Affix */ +.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ +.highlight .sc { color: #BA2121 } /* Literal.String.Char */ +.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ +.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #BA2121 } /* Literal.String.Double */ +.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ +.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ +.highlight .sx { color: #008000 } /* Literal.String.Other */ +.highlight .sr { color: #A45A77 } /* Literal.String.Regex */ +.highlight .s1 { color: #BA2121 } /* Literal.String.Single */ +.highlight .ss { color: #19177C } /* Literal.String.Symbol */ +.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #0000FF } /* Name.Function.Magic */ +.highlight .vc { color: #19177C } /* Name.Variable.Class */ +.highlight .vg { color: #19177C } /* Name.Variable.Global */ +.highlight .vi { color: #19177C } /* Name.Variable.Instance */ +.highlight .vm { color: #19177C } /* Name.Variable.Magic */ +.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/docs/_build/_static/searchtools.js b/docs/_build/_static/searchtools.js new file mode 100644 index 0000000..7918c3f --- /dev/null +++ b/docs/_build/_static/searchtools.js @@ -0,0 +1,574 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + /** + * execute search (requires search index to be loaded) + */ + query: (query) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + // array of [docname, title, anchor, descr, score, filename] + let results = []; + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + results.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id] of foundEntries) { + let score = Math.round(100 * queryLower.length / entry.length) + results.push([ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // lookup as object + objectTerms.forEach((term) => + results.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); + + // now sort the results by score (in opposite order of appearance, since the + // display function below uses pop() to retrieve items) and then + // alphabetically + results.sort((a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; + }); + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + results = results.reverse(); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord) && !terms[word]) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord) && !titleTerms[word]) + arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); + }); + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) + fileMap.get(file).push(word); + else fileMap.set(file, [word]); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords) => { + const text = Search.htmlToText(htmlText); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/docs/_build/_static/sphinx_highlight.js b/docs/_build/_static/sphinx_highlight.js new file mode 100644 index 0000000..8a96c69 --- /dev/null +++ b/docs/_build/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/docs/_build/genindex.html b/docs/_build/genindex.html new file mode 100644 index 0000000..b541bf6 --- /dev/null +++ b/docs/_build/genindex.html @@ -0,0 +1,751 @@ + + + + + + Index — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Index

+ +
+ _ + | A + | C + | D + | E + | F + | G + | I + | L + | M + | N + | O + | P + | R + | S + | T + | U + | V + +
+

_

+ + +
+ +

A

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

I

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
    +
  • master_table_exists() (in module MEDfl.NetManager.net_helper) +
  • +
  • + MEDfl + +
  • +
  • + MEDfl.LearningManager + +
  • +
  • + MEDfl.LearningManager.client + +
  • +
  • + MEDfl.LearningManager.dynamicModal + +
  • +
  • + MEDfl.LearningManager.federated_dataset + +
  • +
  • + MEDfl.LearningManager.flpipeline + +
  • +
  • + MEDfl.LearningManager.model + +
  • +
  • + MEDfl.LearningManager.plot + +
  • +
  • + MEDfl.LearningManager.server + +
  • +
  • + MEDfl.LearningManager.strategy + +
  • +
  • + MEDfl.LearningManager.utils + +
  • +
  • + MEDfl.NetManager + +
  • +
  • + MEDfl.NetManager.dataset + +
  • +
  • + MEDfl.NetManager.flsetup + +
  • +
  • + MEDfl.NetManager.net_helper + +
  • +
+ +

N

+ + + +
+ +

O

+ + +
+ +

P

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + + +
+ + + +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/index.html b/docs/_build/index.html new file mode 100644 index 0000000..7d919b8 --- /dev/null +++ b/docs/_build/index.html @@ -0,0 +1,133 @@ + + + + + + + Welcome to MEDfl-package-docs’s documentation! — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Welcome to MEDfl-package-docs’s documentation!

+
+

Contents:

+ +
+
+
+

Indices and tables

+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/modules.html b/docs/_build/modules.html new file mode 100644 index 0000000..2370266 --- /dev/null +++ b/docs/_build/modules.html @@ -0,0 +1,157 @@ + + + + + + + MEDfl — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/_build/objects.inv b/docs/_build/objects.inv new file mode 100644 index 0000000..b513ae5 Binary files /dev/null and b/docs/_build/objects.inv differ diff --git a/docs/_build/py-modindex.html b/docs/_build/py-modindex.html new file mode 100644 index 0000000..ea25cdc --- /dev/null +++ b/docs/_build/py-modindex.html @@ -0,0 +1,214 @@ + + + + + + Python Module Index — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Python Module Index

+ +
+ m +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ m
+ MEDfl +
    + MEDfl.LearningManager +
    + MEDfl.LearningManager.client +
    + MEDfl.LearningManager.dynamicModal +
    + MEDfl.LearningManager.federated_dataset +
    + MEDfl.LearningManager.flpipeline +
    + MEDfl.LearningManager.model +
    + MEDfl.LearningManager.plot +
    + MEDfl.LearningManager.server +
    + MEDfl.LearningManager.strategy +
    + MEDfl.LearningManager.utils +
    + MEDfl.NetManager +
    + MEDfl.NetManager.dataset +
    + MEDfl.NetManager.flsetup +
    + MEDfl.NetManager.net_helper +
    + MEDfl.NetManager.net_manager_queries +
    + MEDfl.NetManager.network +
    + MEDfl.NetManager.node +
+ + +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/search.html b/docs/_build/search.html new file mode 100644 index 0000000..7d8bac2 --- /dev/null +++ b/docs/_build/search.html @@ -0,0 +1,129 @@ + + + + + + Search — MEDfl 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + + + +
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2023, MEDomics consortium.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/docs/_build/searchindex.js b/docs/_build/searchindex.js new file mode 100644 index 0000000..3d25299 --- /dev/null +++ b/docs/_build/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["MEDfl", "MEDfl.LearningManager", "MEDfl.NetManager", "index", "modules"], "filenames": ["MEDfl.rst", "MEDfl.LearningManager.rst", "MEDfl.NetManager.rst", "index.rst", "modules.rst"], "titles": ["MEDfl package", "MEDfl.LearningManager package", "MEDfl.NetManager package", "Welcome to MEDfl-package-docs\u2019s documentation!", "MEDfl"], "terms": {"learningmanag": [0, 4], "submodul": [0, 4], "client": [0, 4], "flowercli": [0, 1], "cid": [0, 1], "local_model": [0, 1], "trainload": [0, 1], "valload": [0, 1], "diff_priv": [0, 1], "__init__": [0, 1, 2], "context": [0, 1], "evalu": [0, 1], "fit": [0, 1], "get_paramet": [0, 1], "valid": [0, 1, 2], "dynamicmod": [0, 4], "dynamicmodel": [0, 1], "create_binary_classifi": [0, 1], "create_convolutional_neural_network": [0, 1], "create_linear_regressor": [0, 1], "create_logistic_regressor": [0, 1], "create_lstm_network": [0, 1], "create_model": [0, 1], "create_multiclass_classifi": [0, 1], "create_recurrent_neural_network": [0, 1], "federated_dataset": [0, 4], "federateddataset": [0, 1, 2], "creat": [0, 1, 2], "updat": [0, 1, 2], "flpipelin": [0, 2, 4], "name": [0, 1, 2], "descript": [0, 1, 2], "server": [0, 4], "auto_test": [0, 1], "delet": [0, 1, 2], "test_by_nod": [0, 1], "create_queri": [0, 1], "model": [0, 4], "optim": [0, 1], "criterion": [0, 1], "load_model": [0, 1], "save_model": [0, 1], "set_paramet": [0, 1], "train": [0, 1, 2], "plot": [0, 4], "accuracylossplott": [0, 1], "results_dict": [0, 1], "paramet": [0, 1, 2], "metric": [0, 1], "iter": [0, 1], "plot_accuracy_loss": [0, 1], "plot_classification_report": [0, 1], "plot_confusion_matrix_by_nod": [0, 1], "plot_global_confusion_matrix": [0, 1], "flowerserv": [0, 1], "global_model": [0, 1], "strategi": [0, 2, 4], "num_round": [0, 1], "num_client": [0, 1], "fed_dataset": [0, 1], "accuraci": [0, 1], "loss": [0, 1], "flower_cli": [0, 1], "client_fn": [0, 1], "run": [0, 1], "fraction_fit": [0, 1], "fraction_evalu": [0, 1], "min_fit_cli": [0, 1], "min_evaluate_cli": [0, 1], "min_available_cli": [0, 1], "initial_paramet": [0, 1], "util": [0, 2, 4], "custom_classification_report": [0, 1], "empty_db": [0, 1], "get_node_confusion_matrix": [0, 1], "get_pipeline_confusion_matrix": [0, 1], "get_pipeline_from_nam": [0, 1], "get_pipeline_result": [0, 1], "test": [0, 1, 2], "netmanag": [0, 4], "dataset": [0, 1, 4], "delete_dataset": [0, 2], "list_alldataset": [0, 2], "update_data": [0, 2], "upload_dataset": [0, 2], "flsetup": [0, 1, 4], "create_dataloader_from_nod": [0, 2], "create_federated_dataset": [0, 2], "create_nodes_from_master_dataset": [0, 2], "get_fldataset": [0, 2], "list_allsetup": [0, 2], "read_setup": [0, 2], "net_help": [0, 4], "get_feddataset_id_from_nam": [0, 2], "get_flpipeline_from_nam": [0, 2], "get_flsetupid_from_nam": [0, 2], "get_netid_from_nam": [0, 2], "get_nodeid_from_nam": [0, 2], "is_str": [0, 2], "master_table_exist": [0, 2], "process_data_after_read": [0, 2], "process_eicu": [0, 2], "net_manager_queri": [0, 4], "network": [0, 1, 4], "mtable_exist": [0, 2], "add_nod": [0, 2], "create_master_dataset": [0, 2], "create_network": [0, 2], "delete_network": [0, 2], "list_allnetwork": [0, 2], "list_allnod": [0, 2], "update_network": [0, 2], "use_network": [0, 2], "node": [0, 1, 4], "test_fract": [0, 2], "assign_dataset": [0, 2], "check_dataset_compat": [0, 2], "create_nod": [0, 2], "delete_nod": [0, 2], "get_dataset": [0, 2], "unassign_dataset": [0, 2], "update_nod": [0, 2], "class": [1, 2], "str": [1, 2], "dataload": [1, 2], "bool": [1, 2], "true": [1, 2], "sourc": [1, 2], "base": [1, 2], "numpycli": 1, "id": [1, 2], "type": [1, 2], "local": 1, "feder": [1, 2], "learn": [1, 2], "data": [1, 2], "flag": [1, 2], "indic": [1, 2], "whether": [1, 2], "us": [1, 2], "differenti": 1, "privaci": 1, "initi": [1, 2], "instanc": [1, 2], "config": 1, "return": [1, 2], "receiv": 1, "from": [1, 2], "configur": 1, "inform": [1, 2], "number": 1, "exampl": 1, "tupl": 1, "current": 1, "numpi": 1, "arrai": 1, "object": [1, 2], "variou": 1, "neural": 1, "static": [1, 2], "input_dim": 1, "hidden_dim": 1, "output_dim": 1, "activ": 1, "relu": 1, "dropout_r": 1, "0": [1, 2], "batch_norm": 1, "fals": [1, 2], "use_gpu": 1, "binari": 1, "classifi": 1, "customiz": 1, "architectur": [1, 2], "int": [1, 2], "dimens": 1, "input": [1, 2], "list": [1, 2], "hidden": 1, "layer": 1, "output": [1, 2], "option": [1, 2], "function": 1, "default": [1, 2], "i": [1, 2], "float": [1, 2], "dropout": 1, "rate": 1, "regular": 1, "appli": 1, "batch": [1, 2], "normal": 1, "gpu": 1, "acceler": 1, "pytorch": [1, 2], "torch": [1, 2], "nn": 1, "input_channel": 1, "output_channel": 1, "kernel_s": 1, "convolut": 1, "cnn": 1, "channel": 1, "size": [1, 2], "kernel": 1, "linear": 1, "regressor": 1, "logist": 1, "input_s": 1, "hidden_s": 1, "long": 1, "short": 1, "term": 1, "memori": 1, "lstm": 1, "model_typ": 1, "params_dict": [1, 2], "specif": 1, "dynam": 1, "given": [1, 2], "multiclass": 1, "svm": 1, "recurr": 1, "autoencod": 1, "dict": [1, 2], "dictionari": [1, 2], "contain": [1, 2], "creation": [1, 2], "rnn": 1, "train_nod": [1, 2], "test_nod": [1, 2], "testload": 1, "repres": [1, 2], "loader": 1, "flsetupid": [1, 2], "new": [1, 2], "databas": [1, 2], "The": [1, 2], "associ": [1, 2], "flpipeid": 1, "fedid": [1, 2], "flpipe": 1, "manag": 1, "pipelin": [1, 2], "A": [1, 2], "self": [1, 2], "none": [1, 2], "specifi": [1, 2], "test_frac": [1, 2], "1": [1, 2], "automat": 1, "all": [1, 2], "fraction": [1, 2], "classif": 1, "report": 1, "result": 1, "entri": 1, "string": [1, 2], "store": 1, "its": 1, "note": [1, 2], "thi": 1, "placehold": 1, "method": [1, 2], "need": 1, "implement": [1, 2], "your": 1, "setup": [1, 2], "node_nam": 1, "attribut": [1, 2], "rais": [1, 2], "typeerror": [1, 2], "If": [1, 2], "creation_d": 1, "callabl": 1, "val_load": 1, "devic": 1, "cpu": 1, "which": [1, 2], "perform": 1, "ndarrai": 1, "get": [1, 2], "np": 1, "model_nam": 1, "load": 1, "file": [1, 2], "save": 1, "except": 1, "an": [1, 2], "issu": 1, "dure": 1, "process": [1, 2], "set": 1, "train_load": 1, "epoch": 1, "privacy_engin": 1, "one": 1, "engin": [1, 2], "enabl": 1, "valu": [1, 2], "epsilon": 1, "experi": 1, "organ": 1, "uniqu": [1, 2], "rang": 1, "round": 1, "differ": 1, "pipeline_nam": 1, "comparison": 1, "between": 1, "confus": 1, "matrix": 1, "global": 1, "lr": 1, "001": 1, "adam": 1, "85": 1, "89": 1, "92": 1, "94": 1, "ellipsi": 1, "2": [1, 2], "15": 1, "08": 1, "01": 1, "sgd": 1, "88": 1, "91": 1, "93": 1, "95": 1, "18": 1, "13": 1, "09": 1, "07": 1, "82": 1, "87": 1, "25": 1, "12": 1, "should": 1, "have": 1, "len": 1, "num": 1, "delta": 1, "common": 1, "thing": 1, "aggreg": 1, "diff_privaci": 1, "client_resourc": 1, "num_cpu": 1, "num_gpu": 1, "central": 1, "flower": 1, "among": 1, "commun": 1, "particip": 1, "each": 1, "individu": 1, "server_round": 1, "ani": 1, "dtype": 1, "byte": 1, "fl": [1, 2], "scalar": 1, "simul": 1, "histori": 1, "fedavg": 1, "evaluation_method": 1, "minimum": 1, "avail": 1, "requir": 1, "start": 1, "parametr": 1, "distribut": 1, "y_true": 1, "y_pred": 1, "comput": 1, "custom": 1, "includ": [1, 2], "sensit": 1, "precis": 1, "npv": 1, "f1": 1, "score": 1, "posit": 1, "like": 1, "label": [1, 2], "predict": 1, "empti": [1, 2], "record": 1, "multipl": 1, "tabl": [1, 2], "reset": 1, "auto": 1, "increment": 1, "counter": 1, "pipeline_id": 1, "datafram": [1, 2], "panda": [1, 2], "test_load": 1, "path": 2, "csv": 2, "assum": 2, "pd": 2, "Not": 2, "yet": 2, "nodeid": 2, "upload": 2, "fill_strategi": 2, "mean": 2, "fit_encod": 2, "to_drop": 2, "train_batch_s": 2, "32": 2, "test_batch_s": 2, "split_frac": 2, "read": 2, "val_frac": 2, "featur": 2, "master": 2, "column_nam": 2, "column": 2, "masterdataset": 2, "retriev": 2, "": 2, "about": 2, "classmethod": 2, "feddataset": 2, "found": 2, "flpipelineid": 2, "netnam": 2, "netid": 2, "nodenam": 2, "data_df": 2, "row": 2, "x": 2, "check": 2, "convert": 2, "accordingli": 2, "seri": 2, "exist": 2, "otherwis": 2, "after": 2, "encod": 2, "drop": 2, "tensordataset": 2, "imput": 2, "miss": 2, "eicu": 2, "fill": 2, "replac": 2, "nan": 2, "unknown": 2, "integ": 2, "add": 2, "path_to_csv": 2, "d": 2, "esi": 2, "3c": 2, "pfe": 2, "last_year": 2, "code": 2, "notebook": 2, "eicu_test": 2, "insert": 2, "rtype": 2, "network_nam": 2, "els": 2, "sqlalchemi": 2, "connect": 2, "when": 2, "dataset_nam": 2, "assign": 2, "param": 2, "compat": 2, "belong": 2, "filter": 2, "unssign": 2, "index": 3, "modul": [3, 4], "search": 3, "page": 3, "packag": 4, "subpackag": 4, "content": 4}, "objects": {"": [[0, 0, 0, "-", "MEDfl"]], "MEDfl": [[1, 0, 0, "-", "LearningManager"], [2, 0, 0, "-", "NetManager"]], "MEDfl.LearningManager": [[1, 0, 0, "-", "client"], [1, 0, 0, "-", "dynamicModal"], [1, 0, 0, "-", "federated_dataset"], [1, 0, 0, "-", "flpipeline"], [1, 0, 0, "-", "model"], [1, 0, 0, "-", "plot"], [1, 0, 0, "-", "server"], [1, 0, 0, "-", "strategy"], [1, 0, 0, "-", "utils"]], "MEDfl.LearningManager.client": [[1, 1, 1, "", "FlowerClient"]], "MEDfl.LearningManager.client.FlowerClient": [[1, 2, 1, "", "__init__"], [1, 3, 1, "", "cid"], [1, 3, 1, "", "context"], [1, 3, 1, "", "diff_priv"], [1, 2, 1, "", "evaluate"], [1, 2, 1, "", "fit"], [1, 2, 1, "", "get_parameters"], [1, 3, 1, "", "local_model"], [1, 3, 1, "", "trainloader"], [1, 2, 1, "", "validate"], [1, 3, 1, "", "valloader"]], "MEDfl.LearningManager.dynamicModal": [[1, 1, 1, "", "DynamicModel"]], "MEDfl.LearningManager.dynamicModal.DynamicModel": [[1, 2, 1, "", "create_binary_classifier"], [1, 2, 1, "", "create_convolutional_neural_network"], [1, 2, 1, "", "create_linear_regressor"], [1, 2, 1, "", "create_logistic_regressor"], [1, 2, 1, "", "create_lstm_network"], [1, 2, 1, "", "create_model"], [1, 2, 1, "", "create_multiclass_classifier"], [1, 2, 1, "", "create_recurrent_neural_network"]], "MEDfl.LearningManager.federated_dataset": [[1, 1, 1, "", "FederatedDataset"]], "MEDfl.LearningManager.federated_dataset.FederatedDataset": [[1, 2, 1, "", "__init__"], [1, 2, 1, "", "create"], [1, 2, 1, "", "update"]], "MEDfl.LearningManager.flpipeline": [[1, 1, 1, "", "FLpipeline"], [1, 4, 1, "", "create_query"]], "MEDfl.LearningManager.flpipeline.FLpipeline": [[1, 2, 1, "", "auto_test"], [1, 2, 1, "", "create"], [1, 2, 1, "", "delete"], [1, 3, 1, "", "description"], [1, 3, 1, "", "name"], [1, 3, 1, "", "server"], [1, 2, 1, "", "test_by_node"], [1, 2, 1, "", "validate"]], "MEDfl.LearningManager.model": [[1, 1, 1, "", "Model"]], "MEDfl.LearningManager.model.Model": [[1, 2, 1, "", "__init__"], [1, 3, 1, "", "criterion"], [1, 2, 1, "", "evaluate"], [1, 2, 1, "", "get_parameters"], [1, 2, 1, "", "load_model"], [1, 3, 1, "", "model"], [1, 3, 1, "", "optimizer"], [1, 2, 1, "", "save_model"], [1, 2, 1, "", "set_parameters"], [1, 2, 1, "", "train"], [1, 2, 1, "", "validate"]], "MEDfl.LearningManager.plot": [[1, 1, 1, "", "AccuracyLossPlotter"], [1, 5, 1, "", "results_dict"]], "MEDfl.LearningManager.plot.AccuracyLossPlotter": [[1, 2, 1, "", "__init__"], [1, 3, 1, "", "iterations"], [1, 3, 1, "", "metrics"], [1, 3, 1, "", "parameters"], [1, 2, 1, "", "plot_accuracy_loss"], [1, 2, 1, "", "plot_classification_report"], [1, 2, 1, "", "plot_confusion_Matrix_by_node"], [1, 2, 1, "", "plot_global_confusion_matrix"], [1, 3, 1, "", "results_dict"]], "MEDfl.LearningManager.server": [[1, 1, 1, "", "FlowerServer"]], "MEDfl.LearningManager.server.FlowerServer": [[1, 2, 1, "", "__init__"], [1, 3, 1, "", "accuracies"], [1, 2, 1, "", "client_fn"], [1, 3, 1, "", "diff_priv"], [1, 2, 1, "", "evaluate"], [1, 3, 1, "", "fed_dataset"], [1, 3, 1, "", "flower_clients"], [1, 3, 1, "", "global_model"], [1, 3, 1, "", "losses"], [1, 3, 1, "", "num_clients"], [1, 3, 1, "", "num_rounds"], [1, 2, 1, "", "run"], [1, 3, 1, "", "strategy"], [1, 2, 1, "", "validate"]], "MEDfl.LearningManager.strategy": [[1, 1, 1, "", "Strategy"]], "MEDfl.LearningManager.strategy.Strategy": [[1, 2, 1, "", "__init__"], [1, 3, 1, "", "fraction_evaluate"], [1, 3, 1, "", "fraction_fit"], [1, 3, 1, "", "initial_parameters"], [1, 3, 1, "", "min_available_clients"], [1, 3, 1, "", "min_evaluate_clients"], [1, 3, 1, "", "min_fit_clients"], [1, 3, 1, "", "name"]], "MEDfl.LearningManager.utils": [[1, 4, 1, "", "custom_classification_report"], [1, 4, 1, "", "empty_db"], [1, 4, 1, "", "get_node_confusion_matrix"], [1, 4, 1, "", "get_pipeline_confusion_matrix"], [1, 4, 1, "", "get_pipeline_from_name"], [1, 4, 1, "", "get_pipeline_result"], [1, 4, 1, "", "test"]], "MEDfl.NetManager": [[2, 0, 0, "-", "dataset"], [2, 0, 0, "-", "flsetup"], [2, 0, 0, "-", "net_helper"], [2, 0, 0, "-", "net_manager_queries"], [2, 0, 0, "-", "network"], [2, 0, 0, "-", "node"]], "MEDfl.NetManager.dataset": [[2, 1, 1, "", "DataSet"]], "MEDfl.NetManager.dataset.DataSet": [[2, 2, 1, "", "__init__"], [2, 2, 1, "", "delete_dataset"], [2, 2, 1, "", "list_alldatasets"], [2, 2, 1, "", "update_data"], [2, 2, 1, "", "upload_dataset"], [2, 2, 1, "", "validate"]], "MEDfl.NetManager.flsetup": [[2, 1, 1, "", "FLsetup"]], "MEDfl.NetManager.flsetup.FLsetup": [[2, 2, 1, "", "__init__"], [2, 2, 1, "", "create"], [2, 2, 1, "", "create_dataloader_from_node"], [2, 2, 1, "", "create_federated_dataset"], [2, 2, 1, "", "create_nodes_from_master_dataset"], [2, 2, 1, "", "delete"], [2, 2, 1, "", "get_flDataSet"], [2, 2, 1, "", "list_allsetups"], [2, 2, 1, "", "read_setup"], [2, 2, 1, "", "validate"]], "MEDfl.NetManager.net_helper": [[2, 4, 1, "", "get_feddataset_id_from_name"], [2, 4, 1, "", "get_flpipeline_from_name"], [2, 4, 1, "", "get_flsetupid_from_name"], [2, 4, 1, "", "get_netid_from_name"], [2, 4, 1, "", "get_nodeid_from_name"], [2, 4, 1, "", "is_str"], [2, 4, 1, "", "master_table_exists"], [2, 4, 1, "", "process_data_after_reading"], [2, 4, 1, "", "process_eicu"]], "MEDfl.NetManager.network": [[2, 1, 1, "", "Network"]], "MEDfl.NetManager.network.Network": [[2, 2, 1, "", "__init__"], [2, 2, 1, "", "add_node"], [2, 2, 1, "", "create_master_dataset"], [2, 2, 1, "", "create_network"], [2, 2, 1, "", "delete_network"], [2, 2, 1, "", "list_allnetworks"], [2, 2, 1, "", "list_allnodes"], [2, 3, 1, "", "mtable_exists"], [2, 3, 1, "", "name"], [2, 2, 1, "", "update_network"], [2, 2, 1, "", "use_network"], [2, 2, 1, "", "validate"]], "MEDfl.NetManager.node": [[2, 1, 1, "", "Node"]], "MEDfl.NetManager.node.Node": [[2, 2, 1, "", "__init__"], [2, 2, 1, "", "assign_dataset"], [2, 2, 1, "", "check_dataset_compatibility"], [2, 2, 1, "", "create_node"], [2, 2, 1, "", "delete_node"], [2, 2, 1, "", "get_dataset"], [2, 2, 1, "", "list_alldatasets"], [2, 2, 1, "", "list_allnodes"], [2, 3, 1, "", "name"], [2, 3, 1, "", "test_fraction"], [2, 3, 1, "", "train"], [2, 2, 1, "", "unassign_dataset"], [2, 2, 1, "", "update_node"], [2, 2, 1, "", "upload_dataset"], [2, 2, 1, "", "validate"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:attribute", "4": "py:function", "5": "py:data"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "attribute", "Python attribute"], "4": ["py", "function", "Python function"], "5": ["py", "data", "Python data"]}, "titleterms": {"MEDfl": [0, 1, 2, 3, 4], "packag": [0, 1, 2, 3], "subpackag": 0, "modul": [0, 1, 2], "content": [0, 1, 2, 3], "learningmanag": 1, "submodul": [1, 2], "client": 1, "dynamicmod": 1, "federated_dataset": 1, "flpipelin": 1, "model": 1, "plot": 1, "server": 1, "strategi": 1, "util": 1, "netmanag": 2, "dataset": 2, "flsetup": 2, "net_help": 2, "net_manager_queri": 2, "network": 2, "node": 2, "welcom": 3, "doc": 3, "": 3, "document": 3, "indic": 3, "tabl": 3}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1, "sphinx": 60}, "alltitles": {"MEDfl package": [[0, "MEDfl-package"]], "Subpackages": [[0, "subpackages"]], "Module contents": [[0, "module-MEDfl"], [1, "module-MEDfl.LearningManager"], [2, "module-MEDfl.NetManager"]], "MEDfl.LearningManager package": [[1, "MEDfl-learningmanager-package"]], "Submodules": [[1, "submodules"], [2, "submodules"]], "MEDfl.LearningManager.client module": [[1, "module-MEDfl.LearningManager.client"]], "MEDfl.LearningManager.dynamicModal module": [[1, "module-MEDfl.LearningManager.dynamicModal"]], "MEDfl.LearningManager.federated_dataset module": [[1, "module-MEDfl.LearningManager.federated_dataset"]], "MEDfl.LearningManager.flpipeline module": [[1, "module-MEDfl.LearningManager.flpipeline"]], "MEDfl.LearningManager.model module": [[1, "module-MEDfl.LearningManager.model"]], "MEDfl.LearningManager.plot module": [[1, "module-MEDfl.LearningManager.plot"]], "MEDfl.LearningManager.server module": [[1, "module-MEDfl.LearningManager.server"]], "MEDfl.LearningManager.strategy module": [[1, "module-MEDfl.LearningManager.strategy"]], "MEDfl.LearningManager.utils module": [[1, "module-MEDfl.LearningManager.utils"]], "MEDfl.NetManager package": [[2, "MEDfl-netmanager-package"]], "MEDfl.NetManager.dataset module": [[2, "module-MEDfl.NetManager.dataset"]], "MEDfl.NetManager.flsetup module": [[2, "module-MEDfl.NetManager.flsetup"]], "MEDfl.NetManager.net_helper module": [[2, "module-MEDfl.NetManager.net_helper"]], "MEDfl.NetManager.net_manager_queries module": [[2, "module-MEDfl.NetManager.net_manager_queries"]], "MEDfl.NetManager.network module": [[2, "module-MEDfl.NetManager.network"]], "MEDfl.NetManager.node module": [[2, "module-MEDfl.NetManager.node"]], "Welcome to MEDfl-package-docs\u2019s documentation!": [[3, "welcome-to-MEDfl-package-docs-s-documentation"]], "Contents:": [[3, null]], "Indices and tables": [[3, "indices-and-tables"]], "MEDfl": [[4, "MEDfl"]]}, "indexentries": {"MEDfl": [[0, "module-MEDfl"]], "module": [[0, "module-MEDfl"], [1, "module-MEDfl.LearningManager"], [1, "module-MEDfl.LearningManager.client"], [1, "module-MEDfl.LearningManager.dynamicModal"], [1, "module-MEDfl.LearningManager.federated_dataset"], [1, "module-MEDfl.LearningManager.flpipeline"], [1, "module-MEDfl.LearningManager.model"], [1, "module-MEDfl.LearningManager.plot"], [1, "module-MEDfl.LearningManager.server"], [1, "module-MEDfl.LearningManager.strategy"], [1, "module-MEDfl.LearningManager.utils"], [2, "module-MEDfl.NetManager"], [2, "module-MEDfl.NetManager.dataset"], [2, "module-MEDfl.NetManager.flsetup"], [2, "module-MEDfl.NetManager.net_helper"], [2, "module-MEDfl.NetManager.net_manager_queries"], [2, "module-MEDfl.NetManager.network"], [2, "module-MEDfl.NetManager.node"]], "accuracylossplotter (class in MEDfl.learningmanager.plot)": [[1, "MEDfl.LearningManager.plot.AccuracyLossPlotter"]], "dynamicmodel (class in MEDfl.learningmanager.dynamicmodal)": [[1, "MEDfl.LearningManager.dynamicModal.DynamicModel"]], "flpipeline (class in MEDfl.learningmanager.flpipeline)": [[1, "MEDfl.LearningManager.flpipeline.FLpipeline"]], "federateddataset (class in MEDfl.learningmanager.federated_dataset)": [[1, "MEDfl.LearningManager.federated_dataset.FederatedDataset"]], "flowerclient (class in MEDfl.learningmanager.client)": [[1, "MEDfl.LearningManager.client.FlowerClient"]], "flowerserver (class in MEDfl.learningmanager.server)": [[1, "MEDfl.LearningManager.server.FlowerServer"]], "MEDfl.learningmanager": [[1, "module-MEDfl.LearningManager"]], "MEDfl.learningmanager.client": [[1, "module-MEDfl.LearningManager.client"]], "MEDfl.learningmanager.dynamicmodal": [[1, "module-MEDfl.LearningManager.dynamicModal"]], "MEDfl.learningmanager.federated_dataset": [[1, "module-MEDfl.LearningManager.federated_dataset"]], "MEDfl.learningmanager.flpipeline": [[1, "module-MEDfl.LearningManager.flpipeline"]], "MEDfl.learningmanager.model": [[1, "module-MEDfl.LearningManager.model"]], "MEDfl.learningmanager.plot": [[1, "module-MEDfl.LearningManager.plot"]], "MEDfl.learningmanager.server": [[1, "module-MEDfl.LearningManager.server"]], "MEDfl.learningmanager.strategy": [[1, "module-MEDfl.LearningManager.strategy"]], "MEDfl.learningmanager.utils": [[1, "module-MEDfl.LearningManager.utils"]], "model (class in MEDfl.learningmanager.model)": [[1, "MEDfl.LearningManager.model.Model"]], "strategy (class in MEDfl.learningmanager.strategy)": [[1, "MEDfl.LearningManager.strategy.Strategy"]], "__init__() (MEDfl.learningmanager.client.flowerclient method)": [[1, "MEDfl.LearningManager.client.FlowerClient.__init__"]], "__init__() (MEDfl.learningmanager.federated_dataset.federateddataset method)": [[1, "MEDfl.LearningManager.federated_dataset.FederatedDataset.__init__"]], "__init__() (MEDfl.learningmanager.model.model method)": [[1, "MEDfl.LearningManager.model.Model.__init__"]], "__init__() (MEDfl.learningmanager.plot.accuracylossplotter method)": [[1, "MEDfl.LearningManager.plot.AccuracyLossPlotter.__init__"]], "__init__() (MEDfl.learningmanager.server.flowerserver method)": [[1, "MEDfl.LearningManager.server.FlowerServer.__init__"]], "__init__() (MEDfl.learningmanager.strategy.strategy method)": [[1, "MEDfl.LearningManager.strategy.Strategy.__init__"]], "accuracies (MEDfl.learningmanager.server.flowerserver attribute)": [[1, "MEDfl.LearningManager.server.FlowerServer.accuracies"]], "auto_test() (MEDfl.learningmanager.flpipeline.flpipeline method)": [[1, "MEDfl.LearningManager.flpipeline.FLpipeline.auto_test"]], "cid (MEDfl.learningmanager.client.flowerclient attribute)": [[1, "MEDfl.LearningManager.client.FlowerClient.cid"]], "client_fn() (MEDfl.learningmanager.server.flowerserver method)": [[1, "MEDfl.LearningManager.server.FlowerServer.client_fn"]], "context (MEDfl.learningmanager.client.flowerclient attribute)": [[1, "MEDfl.LearningManager.client.FlowerClient.context"]], "create() (MEDfl.learningmanager.federated_dataset.federateddataset method)": [[1, "MEDfl.LearningManager.federated_dataset.FederatedDataset.create"]], "create() (MEDfl.learningmanager.flpipeline.flpipeline method)": [[1, "MEDfl.LearningManager.flpipeline.FLpipeline.create"]], "create_binary_classifier() (MEDfl.learningmanager.dynamicmodal.dynamicmodel static method)": [[1, "MEDfl.LearningManager.dynamicModal.DynamicModel.create_binary_classifier"]], "create_convolutional_neural_network() (MEDfl.learningmanager.dynamicmodal.dynamicmodel static method)": [[1, "MEDfl.LearningManager.dynamicModal.DynamicModel.create_convolutional_neural_network"]], "create_linear_regressor() (MEDfl.learningmanager.dynamicmodal.dynamicmodel static method)": [[1, "MEDfl.LearningManager.dynamicModal.DynamicModel.create_linear_regressor"]], "create_logistic_regressor() (MEDfl.learningmanager.dynamicmodal.dynamicmodel static method)": [[1, "MEDfl.LearningManager.dynamicModal.DynamicModel.create_logistic_regressor"]], "create_lstm_network() (MEDfl.learningmanager.dynamicmodal.dynamicmodel static method)": [[1, "MEDfl.LearningManager.dynamicModal.DynamicModel.create_lstm_network"]], "create_model() (MEDfl.learningmanager.dynamicmodal.dynamicmodel method)": [[1, "MEDfl.LearningManager.dynamicModal.DynamicModel.create_model"]], "create_multiclass_classifier() (MEDfl.learningmanager.dynamicmodal.dynamicmodel static method)": [[1, "MEDfl.LearningManager.dynamicModal.DynamicModel.create_multiclass_classifier"]], "create_query() (in module MEDfl.learningmanager.flpipeline)": [[1, "MEDfl.LearningManager.flpipeline.create_query"]], "create_recurrent_neural_network() (MEDfl.learningmanager.dynamicmodal.dynamicmodel static method)": [[1, "MEDfl.LearningManager.dynamicModal.DynamicModel.create_recurrent_neural_network"]], "criterion (MEDfl.learningmanager.model.model attribute)": [[1, "MEDfl.LearningManager.model.Model.criterion"]], "custom_classification_report() (in module MEDfl.learningmanager.utils)": [[1, "MEDfl.LearningManager.utils.custom_classification_report"]], "delete() (MEDfl.learningmanager.flpipeline.flpipeline method)": [[1, "MEDfl.LearningManager.flpipeline.FLpipeline.delete"]], "description (MEDfl.learningmanager.flpipeline.flpipeline attribute)": [[1, "MEDfl.LearningManager.flpipeline.FLpipeline.description"]], "diff_priv (MEDfl.learningmanager.client.flowerclient attribute)": [[1, "MEDfl.LearningManager.client.FlowerClient.diff_priv"]], "diff_priv (MEDfl.learningmanager.server.flowerserver attribute)": [[1, "MEDfl.LearningManager.server.FlowerServer.diff_priv"]], "empty_db() (in module MEDfl.learningmanager.utils)": [[1, "MEDfl.LearningManager.utils.empty_db"]], "evaluate() (MEDfl.learningmanager.client.flowerclient method)": [[1, "MEDfl.LearningManager.client.FlowerClient.evaluate"]], "evaluate() (MEDfl.learningmanager.model.model method)": [[1, "MEDfl.LearningManager.model.Model.evaluate"]], "evaluate() (MEDfl.learningmanager.server.flowerserver method)": [[1, "MEDfl.LearningManager.server.FlowerServer.evaluate"]], "fed_dataset (MEDfl.learningmanager.server.flowerserver attribute)": [[1, "MEDfl.LearningManager.server.FlowerServer.fed_dataset"]], "fit() (MEDfl.learningmanager.client.flowerclient method)": [[1, "MEDfl.LearningManager.client.FlowerClient.fit"]], "flower_clients (MEDfl.learningmanager.server.flowerserver attribute)": [[1, "MEDfl.LearningManager.server.FlowerServer.flower_clients"]], "fraction_evaluate (MEDfl.learningmanager.strategy.strategy attribute)": [[1, "MEDfl.LearningManager.strategy.Strategy.fraction_evaluate"]], "fraction_fit (MEDfl.learningmanager.strategy.strategy attribute)": [[1, "MEDfl.LearningManager.strategy.Strategy.fraction_fit"]], "get_node_confusion_matrix() (in module MEDfl.learningmanager.utils)": [[1, "MEDfl.LearningManager.utils.get_node_confusion_matrix"]], "get_parameters() (MEDfl.learningmanager.client.flowerclient method)": [[1, "MEDfl.LearningManager.client.FlowerClient.get_parameters"]], "get_parameters() (MEDfl.learningmanager.model.model method)": [[1, "MEDfl.LearningManager.model.Model.get_parameters"]], "get_pipeline_confusion_matrix() (in module MEDfl.learningmanager.utils)": [[1, "MEDfl.LearningManager.utils.get_pipeline_confusion_matrix"]], "get_pipeline_from_name() (in module MEDfl.learningmanager.utils)": [[1, "MEDfl.LearningManager.utils.get_pipeline_from_name"]], "get_pipeline_result() (in module MEDfl.learningmanager.utils)": [[1, "MEDfl.LearningManager.utils.get_pipeline_result"]], "global_model (MEDfl.learningmanager.server.flowerserver attribute)": [[1, "MEDfl.LearningManager.server.FlowerServer.global_model"]], "initial_parameters (MEDfl.learningmanager.strategy.strategy attribute)": [[1, "MEDfl.LearningManager.strategy.Strategy.initial_parameters"]], "iterations (MEDfl.learningmanager.plot.accuracylossplotter attribute)": [[1, "MEDfl.LearningManager.plot.AccuracyLossPlotter.iterations"]], "load_model() (MEDfl.learningmanager.model.model static method)": [[1, "MEDfl.LearningManager.model.Model.load_model"]], "local_model (MEDfl.learningmanager.client.flowerclient attribute)": [[1, "MEDfl.LearningManager.client.FlowerClient.local_model"]], "losses (MEDfl.learningmanager.server.flowerserver attribute)": [[1, "MEDfl.LearningManager.server.FlowerServer.losses"]], "metrics (MEDfl.learningmanager.plot.accuracylossplotter attribute)": [[1, "MEDfl.LearningManager.plot.AccuracyLossPlotter.metrics"]], "min_available_clients (MEDfl.learningmanager.strategy.strategy attribute)": [[1, "MEDfl.LearningManager.strategy.Strategy.min_available_clients"]], "min_evaluate_clients (MEDfl.learningmanager.strategy.strategy attribute)": [[1, "MEDfl.LearningManager.strategy.Strategy.min_evaluate_clients"]], "min_fit_clients (MEDfl.learningmanager.strategy.strategy attribute)": [[1, "MEDfl.LearningManager.strategy.Strategy.min_fit_clients"]], "model (MEDfl.learningmanager.model.model attribute)": [[1, "MEDfl.LearningManager.model.Model.model"]], "name (MEDfl.learningmanager.flpipeline.flpipeline attribute)": [[1, "MEDfl.LearningManager.flpipeline.FLpipeline.name"]], "name (MEDfl.learningmanager.strategy.strategy attribute)": [[1, "MEDfl.LearningManager.strategy.Strategy.name"]], "num_clients (MEDfl.learningmanager.server.flowerserver attribute)": [[1, "MEDfl.LearningManager.server.FlowerServer.num_clients"]], "num_rounds (MEDfl.learningmanager.server.flowerserver attribute)": [[1, "MEDfl.LearningManager.server.FlowerServer.num_rounds"]], "optimizer (MEDfl.learningmanager.model.model attribute)": [[1, "MEDfl.LearningManager.model.Model.optimizer"]], "parameters (MEDfl.learningmanager.plot.accuracylossplotter attribute)": [[1, "MEDfl.LearningManager.plot.AccuracyLossPlotter.parameters"]], "plot_accuracy_loss() (MEDfl.learningmanager.plot.accuracylossplotter method)": [[1, "MEDfl.LearningManager.plot.AccuracyLossPlotter.plot_accuracy_loss"]], "plot_classification_report() (MEDfl.learningmanager.plot.accuracylossplotter static method)": [[1, "MEDfl.LearningManager.plot.AccuracyLossPlotter.plot_classification_report"]], "plot_confusion_matrix_by_node() (MEDfl.learningmanager.plot.accuracylossplotter static method)": [[1, "MEDfl.LearningManager.plot.AccuracyLossPlotter.plot_confusion_Matrix_by_node"]], "plot_global_confusion_matrix() (MEDfl.learningmanager.plot.accuracylossplotter static method)": [[1, "MEDfl.LearningManager.plot.AccuracyLossPlotter.plot_global_confusion_matrix"]], "results_dict (MEDfl.learningmanager.plot.accuracylossplotter attribute)": [[1, "MEDfl.LearningManager.plot.AccuracyLossPlotter.results_dict"]], "results_dict (in module MEDfl.learningmanager.plot)": [[1, "MEDfl.LearningManager.plot.results_dict"]], "run() (MEDfl.learningmanager.server.flowerserver method)": [[1, "MEDfl.LearningManager.server.FlowerServer.run"]], "save_model() (MEDfl.learningmanager.model.model static method)": [[1, "MEDfl.LearningManager.model.Model.save_model"]], "server (MEDfl.learningmanager.flpipeline.flpipeline attribute)": [[1, "MEDfl.LearningManager.flpipeline.FLpipeline.server"]], "set_parameters() (MEDfl.learningmanager.model.model method)": [[1, "MEDfl.LearningManager.model.Model.set_parameters"]], "strategy (MEDfl.learningmanager.server.flowerserver attribute)": [[1, "MEDfl.LearningManager.server.FlowerServer.strategy"]], "test() (in module MEDfl.learningmanager.utils)": [[1, "MEDfl.LearningManager.utils.test"]], "test_by_node() (MEDfl.learningmanager.flpipeline.flpipeline method)": [[1, "MEDfl.LearningManager.flpipeline.FLpipeline.test_by_node"]], "train() (MEDfl.learningmanager.model.model method)": [[1, "MEDfl.LearningManager.model.Model.train"]], "trainloader (MEDfl.learningmanager.client.flowerclient attribute)": [[1, "MEDfl.LearningManager.client.FlowerClient.trainloader"]], "update() (MEDfl.learningmanager.federated_dataset.federateddataset method)": [[1, "MEDfl.LearningManager.federated_dataset.FederatedDataset.update"]], "validate() (MEDfl.learningmanager.client.flowerclient method)": [[1, "MEDfl.LearningManager.client.FlowerClient.validate"]], "validate() (MEDfl.learningmanager.flpipeline.flpipeline method)": [[1, "MEDfl.LearningManager.flpipeline.FLpipeline.validate"]], "validate() (MEDfl.learningmanager.model.model method)": [[1, "MEDfl.LearningManager.model.Model.validate"]], "validate() (MEDfl.learningmanager.server.flowerserver method)": [[1, "MEDfl.LearningManager.server.FlowerServer.validate"]], "valloader (MEDfl.learningmanager.client.flowerclient attribute)": [[1, "MEDfl.LearningManager.client.FlowerClient.valloader"]], "dataset (class in MEDfl.netmanager.dataset)": [[2, "MEDfl.NetManager.dataset.DataSet"]], "flsetup (class in MEDfl.netmanager.flsetup)": [[2, "MEDfl.NetManager.flsetup.FLsetup"]], "MEDfl.netmanager": [[2, "module-MEDfl.NetManager"]], "MEDfl.netmanager.dataset": [[2, "module-MEDfl.NetManager.dataset"]], "MEDfl.netmanager.flsetup": [[2, "module-MEDfl.NetManager.flsetup"]], "MEDfl.netmanager.net_helper": [[2, "module-MEDfl.NetManager.net_helper"]], "MEDfl.netmanager.net_manager_queries": [[2, "module-MEDfl.NetManager.net_manager_queries"]], "MEDfl.netmanager.network": [[2, "module-MEDfl.NetManager.network"]], "MEDfl.netmanager.node": [[2, "module-MEDfl.NetManager.node"]], "network (class in MEDfl.netmanager.network)": [[2, "MEDfl.NetManager.network.Network"]], "node (class in MEDfl.netmanager.node)": [[2, "MEDfl.NetManager.node.Node"]], "__init__() (MEDfl.netmanager.dataset.dataset method)": [[2, "MEDfl.NetManager.dataset.DataSet.__init__"]], "__init__() (MEDfl.netmanager.flsetup.flsetup method)": [[2, "MEDfl.NetManager.flsetup.FLsetup.__init__"]], "__init__() (MEDfl.netmanager.network.network method)": [[2, "MEDfl.NetManager.network.Network.__init__"]], "__init__() (MEDfl.netmanager.node.node method)": [[2, "MEDfl.NetManager.node.Node.__init__"]], "add_node() (MEDfl.netmanager.network.network method)": [[2, "MEDfl.NetManager.network.Network.add_node"]], "assign_dataset() (MEDfl.netmanager.node.node method)": [[2, "MEDfl.NetManager.node.Node.assign_dataset"]], "check_dataset_compatibility() (MEDfl.netmanager.node.node method)": [[2, "MEDfl.NetManager.node.Node.check_dataset_compatibility"]], "create() (MEDfl.netmanager.flsetup.flsetup method)": [[2, "MEDfl.NetManager.flsetup.FLsetup.create"]], "create_dataloader_from_node() (MEDfl.netmanager.flsetup.flsetup method)": [[2, "MEDfl.NetManager.flsetup.FLsetup.create_dataloader_from_node"]], "create_federated_dataset() (MEDfl.netmanager.flsetup.flsetup method)": [[2, "MEDfl.NetManager.flsetup.FLsetup.create_federated_dataset"]], "create_master_dataset() (MEDfl.netmanager.network.network method)": [[2, "MEDfl.NetManager.network.Network.create_master_dataset"]], "create_network() (MEDfl.netmanager.network.network method)": [[2, "MEDfl.NetManager.network.Network.create_network"]], "create_node() (MEDfl.netmanager.node.node method)": [[2, "MEDfl.NetManager.node.Node.create_node"]], "create_nodes_from_master_dataset() (MEDfl.netmanager.flsetup.flsetup method)": [[2, "MEDfl.NetManager.flsetup.FLsetup.create_nodes_from_master_dataset"]], "delete() (MEDfl.netmanager.flsetup.flsetup method)": [[2, "MEDfl.NetManager.flsetup.FLsetup.delete"]], "delete_dataset() (MEDfl.netmanager.dataset.dataset method)": [[2, "MEDfl.NetManager.dataset.DataSet.delete_dataset"]], "delete_network() (MEDfl.netmanager.network.network method)": [[2, "MEDfl.NetManager.network.Network.delete_network"]], "delete_node() (MEDfl.netmanager.node.node method)": [[2, "MEDfl.NetManager.node.Node.delete_node"]], "get_dataset() (MEDfl.netmanager.node.node method)": [[2, "MEDfl.NetManager.node.Node.get_dataset"]], "get_feddataset_id_from_name() (in module MEDfl.netmanager.net_helper)": [[2, "MEDfl.NetManager.net_helper.get_feddataset_id_from_name"]], "get_fldataset() (MEDfl.netmanager.flsetup.flsetup method)": [[2, "MEDfl.NetManager.flsetup.FLsetup.get_flDataSet"]], "get_flpipeline_from_name() (in module MEDfl.netmanager.net_helper)": [[2, "MEDfl.NetManager.net_helper.get_flpipeline_from_name"]], "get_flsetupid_from_name() (in module MEDfl.netmanager.net_helper)": [[2, "MEDfl.NetManager.net_helper.get_flsetupid_from_name"]], "get_netid_from_name() (in module MEDfl.netmanager.net_helper)": [[2, "MEDfl.NetManager.net_helper.get_netid_from_name"]], "get_nodeid_from_name() (in module MEDfl.netmanager.net_helper)": [[2, "MEDfl.NetManager.net_helper.get_nodeid_from_name"]], "is_str() (in module MEDfl.netmanager.net_helper)": [[2, "MEDfl.NetManager.net_helper.is_str"]], "list_alldatasets() (MEDfl.netmanager.dataset.dataset static method)": [[2, "MEDfl.NetManager.dataset.DataSet.list_alldatasets"]], "list_alldatasets() (MEDfl.netmanager.node.node method)": [[2, "MEDfl.NetManager.node.Node.list_alldatasets"]], "list_allnetworks() (MEDfl.netmanager.network.network static method)": [[2, "MEDfl.NetManager.network.Network.list_allnetworks"]], "list_allnodes() (MEDfl.netmanager.network.network method)": [[2, "MEDfl.NetManager.network.Network.list_allnodes"]], "list_allnodes() (MEDfl.netmanager.node.node static method)": [[2, "MEDfl.NetManager.node.Node.list_allnodes"]], "list_allsetups() (MEDfl.netmanager.flsetup.flsetup static method)": [[2, "MEDfl.NetManager.flsetup.FLsetup.list_allsetups"]], "master_table_exists() (in module MEDfl.netmanager.net_helper)": [[2, "MEDfl.NetManager.net_helper.master_table_exists"]], "mtable_exists (MEDfl.netmanager.network.network attribute)": [[2, "MEDfl.NetManager.network.Network.mtable_exists"]], "name (MEDfl.netmanager.network.network attribute)": [[2, "MEDfl.NetManager.network.Network.name"]], "name (MEDfl.netmanager.node.node attribute)": [[2, "MEDfl.NetManager.node.Node.name"]], "process_data_after_reading() (in module MEDfl.netmanager.net_helper)": [[2, "MEDfl.NetManager.net_helper.process_data_after_reading"]], "process_eicu() (in module MEDfl.netmanager.net_helper)": [[2, "MEDfl.NetManager.net_helper.process_eicu"]], "read_setup() (MEDfl.netmanager.flsetup.flsetup class method)": [[2, "MEDfl.NetManager.flsetup.FLsetup.read_setup"]], "test_fraction (MEDfl.netmanager.node.node attribute)": [[2, "MEDfl.NetManager.node.Node.test_fraction"]], "train (MEDfl.netmanager.node.node attribute)": [[2, "MEDfl.NetManager.node.Node.train"]], "unassign_dataset() (MEDfl.netmanager.node.node method)": [[2, "MEDfl.NetManager.node.Node.unassign_dataset"]], "update_data() (MEDfl.netmanager.dataset.dataset method)": [[2, "MEDfl.NetManager.dataset.DataSet.update_data"]], "update_network() (MEDfl.netmanager.network.network method)": [[2, "MEDfl.NetManager.network.Network.update_network"]], "update_node() (MEDfl.netmanager.node.node method)": [[2, "MEDfl.NetManager.node.Node.update_node"]], "upload_dataset() (MEDfl.netmanager.dataset.dataset method)": [[2, "MEDfl.NetManager.dataset.DataSet.upload_dataset"]], "upload_dataset() (MEDfl.netmanager.node.node method)": [[2, "MEDfl.NetManager.node.Node.upload_dataset"]], "use_network() (MEDfl.netmanager.network.network method)": [[2, "MEDfl.NetManager.network.Network.use_network"]], "validate() (MEDfl.netmanager.dataset.dataset method)": [[2, "MEDfl.NetManager.dataset.DataSet.validate"]], "validate() (MEDfl.netmanager.flsetup.flsetup method)": [[2, "MEDfl.NetManager.flsetup.FLsetup.validate"]], "validate() (MEDfl.netmanager.network.network method)": [[2, "MEDfl.NetManager.network.Network.validate"]], "validate() (MEDfl.netmanager.node.node method)": [[2, "MEDfl.NetManager.node.Node.validate"]]}}) \ No newline at end of file diff --git a/docs/build/doctrees/Documentation.doctree b/docs/build/doctrees/Documentation.doctree new file mode 100644 index 0000000..fd54d3d Binary files /dev/null and b/docs/build/doctrees/Documentation.doctree differ diff --git a/docs/build/doctrees/Installation.doctree b/docs/build/doctrees/Installation.doctree new file mode 100644 index 0000000..d59cd87 Binary files /dev/null and b/docs/build/doctrees/Installation.doctree differ diff --git a/docs/build/doctrees/MEDfl.LearningManager.doctree b/docs/build/doctrees/MEDfl.LearningManager.doctree new file mode 100644 index 0000000..ae2f6b7 Binary files /dev/null and b/docs/build/doctrees/MEDfl.LearningManager.doctree differ diff --git a/docs/build/doctrees/MEDfl.NetManager.doctree b/docs/build/doctrees/MEDfl.NetManager.doctree new file mode 100644 index 0000000..6e41105 Binary files /dev/null and b/docs/build/doctrees/MEDfl.NetManager.doctree differ diff --git a/docs/build/doctrees/Medfl.doctree b/docs/build/doctrees/Medfl.doctree new file mode 100644 index 0000000..26c344e Binary files /dev/null and b/docs/build/doctrees/Medfl.doctree differ diff --git a/docs/build/doctrees/configuration_file.doctree b/docs/build/doctrees/configuration_file.doctree new file mode 100644 index 0000000..e3dd957 Binary files /dev/null and b/docs/build/doctrees/configuration_file.doctree differ diff --git a/docs/build/doctrees/environment.pickle b/docs/build/doctrees/environment.pickle new file mode 100644 index 0000000..47ac046 Binary files /dev/null and b/docs/build/doctrees/environment.pickle differ diff --git a/docs/build/doctrees/index.doctree b/docs/build/doctrees/index.doctree new file mode 100644 index 0000000..b51c61a Binary files /dev/null and b/docs/build/doctrees/index.doctree differ diff --git a/docs/build/doctrees/modules.doctree b/docs/build/doctrees/modules.doctree new file mode 100644 index 0000000..108c3f7 Binary files /dev/null and b/docs/build/doctrees/modules.doctree differ diff --git a/docs/build/doctrees/tutorials.doctree b/docs/build/doctrees/tutorials.doctree new file mode 100644 index 0000000..7dddcd9 Binary files /dev/null and b/docs/build/doctrees/tutorials.doctree differ diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000..6247f7e --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/source/Documentation.rst b/docs/source/Documentation.rst new file mode 100644 index 0000000..421297c --- /dev/null +++ b/docs/source/Documentation.rst @@ -0,0 +1,17 @@ +Documentation +============= + +We used Sphinx to create the documentation for this project. You can generate and host it locally by compiling the documentation source code using: + +.. code-block:: bash + + cd docs + make clean + make html + +Then open it locally using: + +.. code-block:: bash + + cd _build/html + python -m http.server \ No newline at end of file diff --git a/docs/source/Installation.rst b/docs/source/Installation.rst new file mode 100644 index 0000000..fec02a0 --- /dev/null +++ b/docs/source/Installation.rst @@ -0,0 +1,28 @@ +Installation +============ + +Python installation +------------------- +The MEDfl package requires ``python 3.9`` or more to be run. If you don't have it installed on your machine, check out the following link `Python `_ +It also requires MySQL database + +Package Installation +-------------------- + +For now, you can install the ``MEDfl`` package as follows: + +.. code-block:: bash + + git clone https://github.com/MEDomics-UdeS/MEDfl.git + cd MEDfl + pip install -e . + + +MySQL DB Configuration +---------------------- + +MEDfl requires a MySQL DB connection, and this is in order to allow users to work with their own tabular datasets. We have created a bash script to install and configure a MySQL DB with phpMyAdmin monitoring system. Run the following command, then change your credentials in the MEDfl/scripts/base.py file: + +.. code-block:: bash + + sudo bash MEDfl/scripts/setup_mysql.sh diff --git a/docs/source/MEDfl.LearningManager.rst b/docs/source/MEDfl.LearningManager.rst new file mode 100644 index 0000000..4f9513e --- /dev/null +++ b/docs/source/MEDfl.LearningManager.rst @@ -0,0 +1,85 @@ +MEDfl.LearningManager package +============================= + +Submodules +---------- + +MEDfl.LearningManager.client module +----------------------------------- + +.. automodule:: MEDfl.LearningManager.client + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.dynamicModal module +----------------------------------------- + +.. automodule:: MEDfl.LearningManager.dynamicModal + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.federated\_dataset module +----------------------------------------------- + +.. automodule:: MEDfl.LearningManager.federated_dataset + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.flpipeline module +--------------------------------------- + +.. automodule:: MEDfl.LearningManager.flpipeline + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.model module +---------------------------------- + +.. automodule:: MEDfl.LearningManager.model + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.plot module +--------------------------------- + +.. automodule:: MEDfl.LearningManager.plot + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.server module +----------------------------------- + +.. automodule:: MEDfl.LearningManager.server + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.strategy module +------------------------------------- + +.. automodule:: MEDfl.LearningManager.strategy + :members: + :undoc-members: + :show-inheritance: + +MEDfl.LearningManager.utils module +---------------------------------- + +.. automodule:: MEDfl.LearningManager.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: MEDfl.LearningManager + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/MEDfl.NetManager.rst b/docs/source/MEDfl.NetManager.rst new file mode 100644 index 0000000..d36ef24 --- /dev/null +++ b/docs/source/MEDfl.NetManager.rst @@ -0,0 +1,61 @@ +MEDfl.NetManager package +======================== + +Submodules +---------- + +MEDfl.NetManager.dataset module +------------------------------- + +.. automodule:: MEDfl.NetManager.dataset + :members: + :undoc-members: + :show-inheritance: + +MEDfl.NetManager.flsetup module +------------------------------- + +.. automodule:: MEDfl.NetManager.flsetup + :members: + :undoc-members: + :show-inheritance: + +MEDfl.NetManager.net\_helper module +----------------------------------- + +.. automodule:: MEDfl.NetManager.net_helper + :members: + :undoc-members: + :show-inheritance: + +MEDfl.NetManager.net\_manager\_queries module +--------------------------------------------- + +.. automodule:: MEDfl.NetManager.net_manager_queries + :members: + :undoc-members: + :show-inheritance: + +MEDfl.NetManager.network module +------------------------------- + +.. automodule:: MEDfl.NetManager.network + :members: + :undoc-members: + :show-inheritance: + +MEDfl.NetManager.node module +---------------------------- + +.. automodule:: MEDfl.NetManager.node + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: MEDfl.NetManager + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/Medfl.rst b/docs/source/Medfl.rst new file mode 100644 index 0000000..95b5d99 --- /dev/null +++ b/docs/source/Medfl.rst @@ -0,0 +1,19 @@ +MEDfl package +============= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + MEDfl.LearningManager + MEDfl.NetManager + +Module contents +--------------- + +.. automodule:: MEDfl + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css new file mode 100644 index 0000000..b111464 --- /dev/null +++ b/docs/source/_static/custom.css @@ -0,0 +1,8 @@ +.logo img { + max-width: 100px !important; /* Adjust the width as needed */ + max-height: 100px !important; /* Adjust the height as needed */ +} + +.wy-nav-content { + max-width: 1200px !important; +} \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..1bb56b3 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,172 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import importlib +import os +import sys + +import sphinx.builders.html +import sphinx.builders.latex +import sphinx.builders.texinfo +import sphinx.builders.text +import sphinx.ext.autodoc + +print(sys.executable) + +sys.path.insert(0, os.path.abspath('..')) +sys.path.insert(0, os.path.abspath('../..')) + + + +# -- Project information ----------------------------------------------------- + +project = 'MEDfl' +copyright = '2023, MEDomics consortium' +author = 'MEDomics consortium' + +# The full version, including alpha/beta/rc tags +release = '0.1.0' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinx.ext.autodoc', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.autosectionlabel', + 'sphinx.ext.ifconfig', + 'sphinx_rtd_dark_mode', + 'sphinx-jsonschema', + 'sphinx.ext.viewcode', + 'sphinx.ext.githubpages', + 'sphinx.ext.napoleon'] + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_init_with_doc = True +napoleon_include_private_with_doc = True +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True + +#Todo settings +todo_include_todos=True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# user starts in light mode +default_dark_mode = False + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# + +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "collapse_navigation": True, + "display_version": True, + "sticky_navigation": True, # Set to False to disable the sticky nav while scrolling. + "logo_only": True, # if we have a html_logo below, this shows /only/ the logo with no title text +} + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +html_logo = "figures/MEDomics.png" + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +html_css_files = ['css/custom.css'] + +# assing master document +master_doc = 'index' + +def setup(app): + app.add_css_file('custom.css') + +# PATCH `sphinx-jsonschema` +# to render the extra `options`` and ``tags`` schema properties +# +def _patched_sphinx_jsonschema_simpletype(self, schema): + """Render the *extra* ``required`` and ``options`` schema properties for every object.""" + rows = _original_sphinx_jsonschema_simpletype(self, schema) + + if "required" in schema: + required = schema["required"] + if required not in ["true", "false"]: + raise Exception("The required argument must be one of true, false") + rows.append(self._line(self._cell("required"), self._cell(required))) + del schema["required"] + + if "range" in schema: + range = schema["range"] + rows.append(self._line(self._cell("range"), self._cell(range))) + del schema["range"] + + # if "options" in schema: + # rows.append(self._line(self._cell("options"), self._cell(""))) + # for option in schema["options"]: + # rows.append(self._line(self._cell(""), self._cell(f"``{option}``"), self._cell("test"))) + # + # del schema["options"] + + if "options" in schema: + key = "options" + rows.append(self._line(self._cell(key))) + + for prop in schema[key].keys(): + # insert spaces around the regexp OR operator + # allowing the regexp to be split over multiple lines. + # proplist = prop.split('|') + # dispprop = self._escape(' | '.join(proplist)) + dispprop = prop + bold = '``' + label = self._cell(bold + dispprop + bold) + + if isinstance(schema[key][prop], dict): + obj = schema[key][prop] + rows.extend(self._dispatch(obj, label)[0]) + else: + rows.append(self._line(label, self._cell(schema[key][prop]))) + del schema[key] + + return rows + + +sjs_wide_format = importlib.import_module("sphinx-jsonschema.wide_format") +_original_sphinx_jsonschema_simpletype = sjs_wide_format.WideFormat._simpletype # type: ignore +sjs_wide_format.WideFormat._simpletype = _patched_sphinx_jsonschema_simpletype # type: ignore \ No newline at end of file diff --git a/docs/source/configuration_file.rst b/docs/source/configuration_file.rst new file mode 100644 index 0000000..1d66390 --- /dev/null +++ b/docs/source/configuration_file.rst @@ -0,0 +1,61 @@ +Configuration File +================== + +Project base url +---------------- +In the file ``MEDfl/global_params.yaml``, you need to update the base URL parameters according to the local path of your project. + +.. code-block:: yaml + + base_url: 'PATH_TO_PROJECT/MEDfl' + + +MySQL Configuration +------------------- +In the file ``MEDfl/scripts/db_config.ini``, you can specify the SQL connection parameters. + +.. code-block:: bash + + [mysql] + host = localhost + port = 3306 + user = your_username + password = your_password + database = MEDfl + +Also, in the file ``MEDfl/scripts/create_db.py``: + +.. code-block:: python + + mydb = mysql.connector.connect(host="localhost", user="your_username", password="your_password") + +Learning Parameters +------------------- +In the file ``MEDfl/MEDfl/LearningManager/params.yaml``, you can modify the parameters for creating your model. + +.. code-block:: yaml + + task: BinaryClassification + optimizer: SGD + train_batch_size: 32 + test_batch_size: 1 + train_epochs: 6 + lr: 0.001 + diff_privacy: True + MAX_GRAD_NORM: 1.0 + EPSILON: 20.0 + DELTA: 1e-5 + num_rounds: 3 + min_evalclient: 2 + +DataSets +-------- +In the file ``/MEDfl/MEDfl/LearningManager/params.yaml``, you can specify the path to the CSV files of the dataset you want to use. + +.. code-block:: yaml + + path_to_master_csv: 'PATH TO YOUR MASTER CSV' + path_to_test_csv: 'PATH TO YOUR TEST CSV' + +.. note:: + The ``path_to_master_csv`` is the CSV file used to create the **MasterDataSet** diff --git a/docs/source/figures/Images/FL_gen.png b/docs/source/figures/Images/FL_gen.png new file mode 100644 index 0000000..dd642dc Binary files /dev/null and b/docs/source/figures/Images/FL_gen.png differ diff --git a/docs/source/figures/Images/FTL_comp.png b/docs/source/figures/Images/FTL_comp.png new file mode 100644 index 0000000..9eb52b0 Binary files /dev/null and b/docs/source/figures/Images/FTL_comp.png differ diff --git a/docs/source/figures/Images/FlDatasetDiagramm.png b/docs/source/figures/Images/FlDatasetDiagramm.png new file mode 100644 index 0000000..03ea033 Binary files /dev/null and b/docs/source/figures/Images/FlDatasetDiagramm.png differ diff --git a/docs/source/figures/Images/GDP.png b/docs/source/figures/Images/GDP.png new file mode 100644 index 0000000..0c4797a Binary files /dev/null and b/docs/source/figures/Images/GDP.png differ diff --git a/docs/source/figures/Images/LDP.png b/docs/source/figures/Images/LDP.png new file mode 100644 index 0000000..b231b5b Binary files /dev/null and b/docs/source/figures/Images/LDP.png differ diff --git a/docs/source/figures/Images/LearningManager.png b/docs/source/figures/Images/LearningManager.png new file mode 100644 index 0000000..6d31965 Binary files /dev/null and b/docs/source/figures/Images/LearningManager.png differ diff --git a/docs/source/figures/Images/MEDfl_Diagramm.png b/docs/source/figures/Images/MEDfl_Diagramm.png new file mode 100644 index 0000000..c1db88a Binary files /dev/null and b/docs/source/figures/Images/MEDfl_Diagramm.png differ diff --git a/docs/source/figures/Images/Modelinit.png b/docs/source/figures/Images/Modelinit.png new file mode 100644 index 0000000..ca60d88 Binary files /dev/null and b/docs/source/figures/Images/Modelinit.png differ diff --git a/docs/source/figures/Images/NetManager_Diagramm.png b/docs/source/figures/Images/NetManager_Diagramm.png new file mode 100644 index 0000000..0e0c7e4 Binary files /dev/null and b/docs/source/figures/Images/NetManager_Diagramm.png differ diff --git a/docs/source/figures/Images/NetworkCreation.png b/docs/source/figures/Images/NetworkCreation.png new file mode 100644 index 0000000..acfca0d Binary files /dev/null and b/docs/source/figures/Images/NetworkCreation.png differ diff --git a/docs/source/figures/Images/createServer.png b/docs/source/figures/Images/createServer.png new file mode 100644 index 0000000..9ea6ea9 Binary files /dev/null and b/docs/source/figures/Images/createServer.png differ diff --git a/docs/source/figures/Images/learningresults.png b/docs/source/figures/Images/learningresults.png new file mode 100644 index 0000000..099011a Binary files /dev/null and b/docs/source/figures/Images/learningresults.png differ diff --git a/docs/source/figures/Images/logos/mysqllogo.png b/docs/source/figures/Images/logos/mysqllogo.png new file mode 100644 index 0000000..55d8d19 Binary files /dev/null and b/docs/source/figures/Images/logos/mysqllogo.png differ diff --git a/docs/source/figures/Images/logos/phpmyadmin.png b/docs/source/figures/Images/logos/phpmyadmin.png new file mode 100644 index 0000000..22ce57a Binary files /dev/null and b/docs/source/figures/Images/logos/phpmyadmin.png differ diff --git a/docs/source/figures/Images/logos/wampLogo.png b/docs/source/figures/Images/logos/wampLogo.png new file mode 100644 index 0000000..512fa8f Binary files /dev/null and b/docs/source/figures/Images/logos/wampLogo.png differ diff --git a/docs/source/figures/Images/logos/xampplogo.png b/docs/source/figures/Images/logos/xampplogo.png new file mode 100644 index 0000000..485e829 Binary files /dev/null and b/docs/source/figures/Images/logos/xampplogo.png differ diff --git a/docs/source/figures/Images/modeltest.png b/docs/source/figures/Images/modeltest.png new file mode 100644 index 0000000..72bcd47 Binary files /dev/null and b/docs/source/figures/Images/modeltest.png differ diff --git a/docs/source/figures/Images/netMan.png b/docs/source/figures/Images/netMan.png new file mode 100644 index 0000000..7a99fec Binary files /dev/null and b/docs/source/figures/Images/netMan.png differ diff --git a/docs/source/figures/Images/nodecreation.png b/docs/source/figures/Images/nodecreation.png new file mode 100644 index 0000000..bd165b6 Binary files /dev/null and b/docs/source/figures/Images/nodecreation.png differ diff --git a/docs/source/figures/Images/pipeline.png b/docs/source/figures/Images/pipeline.png new file mode 100644 index 0000000..1a4d396 Binary files /dev/null and b/docs/source/figures/Images/pipeline.png differ diff --git a/docs/source/figures/Images/strategy.png b/docs/source/figures/Images/strategy.png new file mode 100644 index 0000000..b1d78c3 Binary files /dev/null and b/docs/source/figures/Images/strategy.png differ diff --git a/docs/source/figures/MEDomics.png b/docs/source/figures/MEDomics.png new file mode 100644 index 0000000..4456fda Binary files /dev/null and b/docs/source/figures/MEDomics.png differ diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..5e89ea7 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,38 @@ +.. MEDfl-package-docs documentation master file, created by + sphinx-quickstart on Wed Jan 10 21:46:56 2024. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to the MEDfl documentation! +============================================== +This Python package is an open-source tool designed for simulating federated learning and incorporating differential privacy. It empowers researchers and developers to effortlessly create, execute, and assess federated learning pipelines while seamlessly working with various tabular datasets. + +.. image:: figures/Images/MEDfl_Diagramm.png + +.. toctree:: + :maxdepth: 1 + :caption: Getting started + + Installation + Documentation + configuration_file + + +.. toctree:: + :maxdepth: 1 + :caption: Tutorials + + tutorials + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + modules + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/source/modules.rst b/docs/source/modules.rst new file mode 100644 index 0000000..e332f8f --- /dev/null +++ b/docs/source/modules.rst @@ -0,0 +1,7 @@ +MEDfl +===== + +.. toctree:: + :maxdepth: 4 + + MEDfl diff --git a/docs/source/tutorials.rst b/docs/source/tutorials.rst new file mode 100644 index 0000000..78f286a --- /dev/null +++ b/docs/source/tutorials.rst @@ -0,0 +1,211 @@ +Introduction +============ + +``MEDfl`` is a new python package, which is a hybridization of ``Flower`` and ``Opacus``, +while getting inspired by the ``Pysyft`` framework, the main strengths of ``MEDfl`` is that +both users and developpers will benifit from all features of the three listed frameworks. + +Real world scenario +------------------- + +**Martin** is an AI researcher, his main interest is the applications of AI into the healthcare domain. +Recently, he has recieved a call from a prestigious institute, in order to make a study about the realisation of a new project. The project's goal is designing and developping a federated learning system between a set of hospitals, and that by training and using a deep learning model with preserving the privacy of all patient records. + +When Martin finished the first analysis of the project statement, he realized that the project involve the use of Federated Learning, Differential Privacy within a good storage monitoring system. + +Fortunately, Martin knows ``MEDfl``, and he is pretty sure that it's designed for such tasks, and it will be do the desired work. +``MEDfl`` with his two main sub-packages, ``NetManager`` , ``LearningManager`` , let us designing FL expirements and simulating real world scenarios, and that by using differents options to make differents FL architectures (or setups) as well as using differents FL pipelines. + + + +Database management +=================== + +In this tutorial, we'll demonstrate how to initialize your database and establish its connection to MEDfl. Subsequently, we'll explore the step-by-step process of storing various pieces of information. + +Our choice for utilizing `MySQL `_ as the database system is due to its robust features, reliability, and widespread adoption in the industry. Its strong support for structured query language (SQL) and its scalability make it an ideal choice for managing the diverse data sets and configurations within MEDfl. + +.. image:: figures/Images/logos/mysqllogo.png + :width: 150px + :height: 50px + +Before beginning, ensure that you have installed MySQL and one of the servers, such as `WAMP `_ or `XAMPP `_ , and have them running. + +To visualize your database, you can open `PHPMyAdmin `, a web-based tool that allows for convenient management and visualization of your database. + +.. image:: figures/Images/logos/wampLogo.png + :width: 120px + :height: 50px + +.. image:: figures/Images/logos/xampplogo.png + :width: 160px + :height: 50px + +.. image:: figures/Images/logos/phpmyadmin.png + :width: 150px + :height: 50px + +You can also find this tutorial on the repository `Database management tutorial `_ + +NetManager +========== + +The ``NetManager`` module within ``MEDfl`` is responsible for the generation of federated learning networks. It relies on a CSV file containing a DataSet as input. Leveraging this Dataset file, the module creates various nodes within the network, assigns a dataset to each node, and generates the federated dataset for each node. Subsequently, these federated datasets are transferred to the subsequent package, `Learning Manager`. + +.. image:: figures/Images/MEDfl_Diagramm.png + +The NetManager workflow involves five primary steps: + +1. **Network creation** +2. **DataSets storage** +3. **Nodes Creation** +4. **FLsetup Creation** +5. **Federated DataSet Creation** + +.. image:: figures/Images/NetManager_Diagramm.png + :width: 70% + :align: center + +Transfer Learning +================= + +This notebook demonstrates the process of integrating `Transfer Learning `_ using the ``MEDfl`` package. The primary objective of incorporating transfer learning with the package is to harness the capabilities of `Federated-Transfer Learning `_ in training models across different hospitals. In real-world scenarios, one of the clients or the aggregating server might possess a `pre-trained model `_ . Leveraging this pre-trained model offers advantages such as enhancing performance and reducing training time. + +In some instances, a client may lack sufficient data to train a model entirely from scratch, hindering the ability to achieve optimal performance. Utilizing transfer learning becomes a viable strategy to maximize the benefits from each client, allowing the integration of previously learned knowledge to enhance model training and performance. + +.. image:: figures/Images/FTL_comp.png + :width: 600px + :height: 400px + +Learning Manager +================ + +.. image:: figures/Images/MEDfl_Diagramm.png + +The ``Learning Manager``, serving as the second subpackage of ``MEDfl``, extends the work initiated by the ``NetManager``. It operates by taking the federated dataset as its input and, through its processes, generates a **Trained model**. Additionally, it **stores relevant Results** within the database. + +The Federated module of MEDfl, which is based on the `Flower `_ framework, is well-designed for doing this task. It mainly follows the Flower paradigm but with many customizations to deal with the SQL database and to be homogeneous with the `NetManager`. + +.. image:: figures/Images/LearningManager.png + :width: 50% + + +Learning Results +================ + +The testing process in ``MEDfl`` is organized into two main phases: + +.. list-table:: Test Phases + :widths: 20 30 50 + :header-rows: 1 + + * - Test + - Used DataSet + - Purpose + * - **Local Test:** + - Test Loaders + - Analyze and compare model results + * - **Independent Tests:** + - HoldOut Dataset + - Validate the final model with an external dataset + + +.. image:: figures/Images/learningresults.png + :width: 50% + +Model choice +============ + +The ``DynamicModel`` class in the ``MEDfl`` package enables dynamic creation of diverse neural network architectures. By allowing users to specify key parameters ``params_dict`` like input dimensions, hidden layer sizes, and output dimensions, the class provides adaptability to various machine learning tasks. + +.. list-table:: Title + :widths: 5 30 65 + :header-rows: 1 + + * - N + - Machine Learning Task + - Parameters + * - 1 + - **Binary Classification** + - + + .. code-block:: python + + params = {'input_dim': 30, + 'hidden_dims': [64, 32], + 'output_dim': 1, + 'activation': 'relu', + 'dropout_rate': 0.2, + 'batch_norm': True, + 'use_gpu': False} + * - 2 + - **Multiclass Classification** + - + + .. code-block:: python + + params = {'input_dim': 30, + 'hidden_dims': [64, 32], + 'output_dim': 3, + 'activation': 'relu', + 'dropout_rate': 0.2, + 'batch_norm': True, + 'use_gpu': False} + * - 3 + - **Linear Regression** + - + + .. code-block:: python + + params = {'input_dim': 30, + 'output_dim': 1, + 'use_gpu': False} + * - 4 + - **Logistic Regression** + - + + .. code-block:: python + + params = {'input_dim': 30, + 'use_gpu': False} + * - 5 + - **CNN** + - + + .. code-block:: python + + params = {'input_channels': 3, + 'output_channels': 16, + 'kernel_size': 3, + 'use_gpu': False} + * - 6 + - **RNN** + - + + .. code-block:: python + + params = {'input_size': 10, + 'hidden_size': 32, + 'use_gpu': False} + * - 7 + - **LSTM Network** + - + + .. code-block:: python + + params = {'input_size': 10, + 'hidden_size': 32, + 'use_gpu': False} + * - 8 + - **Autoencoder** + - + + .. code-block:: python + + params = {'input_size': 30, + 'encoder_hidden_size': 16, + 'use_gpu': False} + +As an integral part of the ``MEDfl`` package, the ``DynamicModel`` class aligns with the package's goal of delivering accessible, efficient, and adaptable tools for medical-focused machine learning applications, enabling users to seamlessly generate and models for diverse medical classification and regression tasks. + + diff --git a/notebooks/eicu_test.csv b/notebooks/eicu_test.csv deleted file mode 100644 index b5bf045..0000000 --- a/notebooks/eicu_test.csv +++ /dev/null @@ -1,400 +0,0 @@ -id,site_hospital,site_region,age,pao2fio2,uo,admissiontype,bicarbonate,bilirubin,bun,chron_dis,gcs,hr,potassium,sbp,sodium,tempc,wbc,event_death -stay147985,site73,Midwest,16,0,4,6,3,0,6,0,5,0,0,5,1,0,0,1 -stay156248,site73,Midwest,7,0,0,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay156308,site60,Midwest,18,0,0,6,0,0,6,0,0,0,3,5,1,0,0,0 -stay157820,site73,Midwest,12,0,11,6,3,0,10,0,0,0,0,0,1,0,0,0 -stay159036,site73,Midwest,18,0,0,6,0,0,6,0,0,4,0,5,0,0,3,0 -stay161900,site63,Midwest,7,0,0,6,3,0,0,0,26,4,0,5,0,0,3,1 -stay161964,site60,Midwest,12,0,0,6,0,0,0,0,0,4,0,0,0,0,3,0 -stay167204,site56,Midwest,15,0,0,6,0,0,0,0,0,0,0,13,0,0,3,0 -stay167957,site63,Midwest,0,0,0,6,0,0,0,0,7,2,3,5,1,0,0,1 -stay177509,site73,Midwest,12,0,0,6,0,0,6,0,0,0,0,5,0,3,3,0 -stay177873,site73,Midwest,7,0,0,6,6,9,6,0,5,4,0,5,0,0,0,0 -stay178652,site59,Midwest,0,0,0,6,0,9,0,0,0,4,3,5,0,3,0,0 -stay180870,site71,Midwest,18,0,0,6,3,0,6,0,0,0,0,13,0,0,0,1 -stay181437,site63,Midwest,15,0,0,8,0,0,6,0,0,2,0,0,0,0,0,0 -stay182413,site58,Midwest,12,0,11,6,0,0,6,9,0,0,0,5,0,0,3,0 -stay186957,site73,Midwest,7,0,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay187667,site73,Midwest,15,0,0,6,6,0,10,0,0,4,3,5,0,0,0,0 -stay187743,site63,Midwest,15,0,0,6,0,0,6,0,5,2,0,5,0,3,0,0 -stay189842,site66,Midwest,18,0,0,6,3,0,0,0,0,2,0,5,1,0,0,0 -stay205928,site67,Midwest,7,0,0,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay213451,site73,Midwest,12,0,0,6,6,0,6,0,13,4,3,5,1,3,3,0 -stay215734,site73,Midwest,7,0,0,6,3,0,6,0,0,4,3,0,0,3,0,0 -stay218304,site58,Midwest,16,0,0,6,0,0,6,0,7,2,3,0,0,0,0,0 -stay219737,site73,Midwest,7,0,4,6,0,0,0,0,0,4,0,2,0,3,3,0 -stay220660,site58,Midwest,12,0,11,6,0,0,6,0,0,2,0,5,0,0,3,0 -stay222966,site73,Midwest,12,0,0,6,3,0,6,0,0,0,0,5,0,3,3,0 -stay223446,site59,Midwest,12,0,11,6,0,0,0,9,0,7,0,5,0,0,3,0 -stay223811,site73,Midwest,0,0,0,6,3,0,0,0,0,7,0,13,0,3,0,0 -stay227824,site73,Midwest,7,0,11,6,0,0,0,0,0,0,3,5,0,3,0,0 -stay228974,site73,Midwest,16,0,0,6,0,0,6,0,5,2,0,5,0,0,0,0 -stay229005,site73,Midwest,18,0,0,6,0,0,6,0,7,2,3,13,0,0,0,0 -stay233773,site60,Midwest,16,0,0,6,0,0,6,0,0,2,0,13,0,0,3,0 -stay235513,site69,Midwest,0,0,0,6,0,0,0,0,0,4,0,13,0,3,3,0 -stay237269,site71,Midwest,18,0,0,6,3,0,6,0,0,2,3,5,0,0,0,0 -stay239061,site66,Midwest,7,0,0,6,0,0,0,0,0,0,0,2,0,0,0,0 -stay239314,site73,Midwest,12,0,11,6,0,0,6,0,0,2,0,5,0,0,0,0 -stay241289,site73,Midwest,18,0,0,6,6,0,6,0,5,2,0,5,0,0,0,1 -stay246348,site79,Midwest,7,0,0,6,0,0,0,10,5,0,0,5,0,0,0,0 -stay251523,site95,Midwest,18,,4,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay252446,site79,Midwest,7,9,4,6,0,0,6,0,0,0,0,0,0,0,3,0 -stay254141,site79,Midwest,0,,11,6,3,0,6,0,0,4,0,5,0,0,12,0 -stay259621,site79,Midwest,18,9,11,6,6,0,0,0,13,4,0,0,0,0,3,1 -stay260689,site79,Midwest,12,0,0,6,3,0,10,0,7,4,0,0,0,0,3,0 -stay260860,site95,Midwest,7,0,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay263047,site92,Midwest,12,,0,6,0,0,0,0,5,0,0,0,0,0,0,0 -stay264276,site92,Midwest,0,11,11,6,0,0,0,0,13,2,0,5,0,0,3,0 -stay264540,site79,Midwest,7,9,0,6,0,0,0,0,13,0,3,5,0,0,3,0 -stay266168,site85,NULL,12,,0,6,0,0,6,0,0,2,3,5,0,0,0,0 -stay269081,site102,NULL,16,0,4,6,3,0,6,0,5,0,0,5,0,3,0,1 -stay271391,site102,NULL,0,9,0,6,3,0,0,0,5,0,0,5,0,0,0,0 -stay277066,site95,Midwest,0,,0,6,0,0,0,0,0,0,0,5,0,3,0,0 -stay279355,site79,Midwest,16,9,0,6,0,0,0,0,7,2,0,5,0,0,0,0 -stay281219,site79,Midwest,16,,0,6,3,0,6,0,0,0,0,0,0,0,0,0 -stay282527,site102,NULL,0,11,4,6,6,0,0,0,13,4,3,13,0,3,12,0 -stay292016,site92,Midwest,7,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0 -stay294603,site79,Midwest,12,9,0,6,3,0,0,0,5,2,0,5,0,0,3,0 -stay294762,site95,Midwest,7,0,4,6,3,0,0,0,0,0,0,0,0,0,0,0 -stay295624,site95,Midwest,16,0,0,6,0,0,0,0,0,2,0,0,5,0,0,0 -stay297044,site92,Midwest,12,0,0,6,0,0,0,9,0,4,3,0,0,0,12,1 -stay297859,site84,Midwest,12,11,0,6,0,0,6,0,7,0,0,5,1,0,3,0 -stay302880,site95,Midwest,7,,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay304989,site79,Midwest,7,0,4,6,0,0,0,0,7,4,0,5,0,0,0,0 -stay305674,site95,Midwest,12,0,11,6,3,0,0,0,0,2,0,5,0,0,0,0 -stay308218,site92,Midwest,15,,4,6,3,0,6,0,0,0,0,5,0,3,3,0 -stay308237,site79,Midwest,7,0,0,6,3,0,6,0,0,0,3,5,0,0,0,0 -stay315114,site125,South,18,0,0,6,0,0,0,0,0,2,0,0,0,0,0,0 -stay316957,site122,South,7,,0,6,0,0,0,9,0,2,3,13,0,0,12,0 -stay318153,site122,South,15,0,11,6,6,0,6,0,7,0,3,13,0,0,0,1 -stay332259,site122,South,7,9,11,6,6,0,10,10,0,0,3,5,0,0,0,0 -stay332661,site125,South,7,6,4,6,3,0,0,0,5,0,0,0,0,0,3,0 -stay336740,site112,South,12,,11,6,0,0,0,0,0,0,0,5,0,0,3,0 -stay337427,site110,South,15,0,11,6,6,0,6,0,0,0,0,5,0,0,3,1 -stay339428,site120,South,18,0,0,6,0,0,0,0,0,4,0,5,0,0,3,0 -stay341174,site122,South,7,0,0,6,3,0,0,0,5,4,0,5,0,3,0,0 -stay342749,site122,South,16,0,11,6,0,0,6,0,7,0,0,13,0,0,0,0 -stay343178,site125,South,7,0,0,6,3,9,6,0,0,7,0,5,0,0,3,0 -stay344919,site122,South,18,,11,6,0,0,0,0,0,0,0,0,0,0,3,0 -stay346306,site122,South,12,0,0,6,3,0,0,9,0,4,3,5,0,0,0,0 -stay355077,site122,South,12,0,4,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay355410,site110,South,15,0,0,6,3,0,0,0,0,0,3,5,5,0,0,0 -stay357122,site122,South,7,0,0,6,0,0,6,0,13,2,3,0,0,0,0,0 -stay368692,site122,South,18,,4,6,0,0,0,0,5,4,0,0,0,0,0,0 -stay373125,site122,South,12,0,11,0,0,4,0,0,0,2,0,5,0,0,0,0 -stay382391,site122,South,7,0,4,6,3,0,0,0,0,0,0,5,1,0,3,0 -stay394170,site122,South,15,,0,6,0,0,6,0,0,0,0,5,0,3,0,0 -stay394324,site112,South,16,0,4,6,3,0,6,0,0,2,0,5,0,0,0,0 -stay396703,site122,South,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0 -stay397142,site122,South,0,0,0,6,0,0,0,0,0,2,0,5,0,0,3,0 -stay403303,site122,South,15,0,11,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay411293,site122,South,18,6,4,6,0,0,6,10,0,0,0,5,0,0,0,1 -stay412911,site122,South,18,9,11,6,6,0,6,9,0,4,0,13,0,0,12,1 -stay423253,site112,South,16,6,0,6,0,0,0,0,5,0,0,5,0,0,0,0 -stay427732,site143,NULL,15,11,0,6,6,0,6,0,0,4,3,13,1,0,12,1 -stay429163,site142,South,12,11,0,6,0,0,6,0,0,2,0,0,0,0,0,0 -stay442854,site144,South,18,9,0,6,0,0,6,0,0,2,3,5,0,0,0,0 -stay443192,site141,South,7,,4,6,6,0,0,0,0,4,0,5,0,0,3,0 -stay447632,site141,South,12,,4,6,3,4,0,9,0,4,3,5,0,0,0,1 -stay450744,site141,South,12,9,0,8,3,0,6,0,0,4,0,5,5,0,0,0 -stay458489,site142,South,18,,0,6,0,0,0,10,0,2,0,5,0,0,3,0 -stay460583,site142,South,12,6,0,6,6,0,0,0,13,4,0,5,1,0,0,0 -stay462690,site141,South,18,,0,6,0,0,6,0,0,0,0,5,0,0,3,0 -stay477532,site144,South,7,11,11,6,6,0,0,17,0,4,0,13,0,0,0,0 -stay488368,site141,South,18,0,11,6,6,0,6,0,0,0,3,5,1,0,0,0 -stay491382,site140,South,18,0,4,6,3,0,6,0,26,2,3,5,1,0,3,0 -stay494366,site140,South,18,,0,6,0,0,6,0,0,0,0,5,0,0,3,0 -stay496613,site144,South,15,0,11,6,3,0,6,0,0,7,0,5,0,0,3,0 -stay498353,site142,South,7,,0,6,0,0,0,0,0,0,0,5,0,0,12,0 -stay500823,site142,South,12,6,11,6,3,0,6,0,0,4,3,13,0,0,3,0 -stay505156,site144,South,18,,4,6,6,0,10,0,0,0,3,5,0,0,3,0 -stay513669,site141,South,0,,11,6,0,0,0,0,0,4,3,5,0,0,0,0 -stay522307,site142,South,7,0,0,6,3,0,10,0,0,4,3,13,0,0,3,0 -stay522954,site144,South,7,,11,6,0,0,0,0,0,0,0,13,0,0,0,0 -stay526988,site141,South,15,0,0,6,3,0,6,0,7,4,3,5,0,3,0,1 -stay532881,site140,South,16,,4,6,0,0,6,0,0,2,0,5,1,0,0,0 -stay546496,site175,West,12,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0 -stay555710,site148,West,15,0,0,6,0,0,6,0,0,4,0,5,1,0,3,0 -stay561748,site167,West,18,0,11,6,0,9,0,0,0,0,0,5,0,0,0,0 -stay567865,site157,West,15,0,0,6,0,0,6,0,7,2,0,13,0,3,0,0 -stay570953,site175,West,18,0,0,6,6,0,10,0,26,2,3,5,0,0,0,0 -stay571553,site155,West,12,0,0,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay574735,site167,West,0,0,0,6,0,0,6,0,0,0,0,13,0,0,0,0 -stay577746,site154,West,0,0,0,6,3,0,0,0,26,4,0,0,0,0,3,0 -stay580326,site171,West,16,0,4,0,6,0,6,0,13,4,3,5,5,0,3,0 -stay581427,site148,West,18,0,0,0,3,0,0,0,0,4,0,5,0,0,0,1 -stay583687,site154,West,18,0,0,6,6,0,6,0,0,4,3,5,0,0,0,0 -stay586393,site171,West,7,11,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay589042,site154,West,15,0,0,6,6,0,6,0,0,4,0,13,1,0,3,1 -stay591957,site167,West,7,0,4,6,6,9,6,0,0,4,3,0,0,0,3,0 -stay602482,site165,West,7,0,0,6,0,0,0,0,7,0,0,5,0,0,0,0 -stay602996,site157,West,16,0,0,6,0,0,6,0,0,2,0,5,0,0,3,0 -stay609431,site146,West,16,0,0,0,0,0,0,0,26,0,3,5,1,0,3,0 -stay611252,site167,West,7,0,11,6,6,0,6,0,0,2,3,13,1,0,0,0 -stay616559,site167,West,15,0,0,6,0,0,6,0,26,0,0,13,0,0,3,0 -stay620641,site176,NULL,15,0,0,6,0,0,6,0,0,2,3,5,0,0,0,0 -stay624235,site157,West,12,0,0,6,0,0,6,0,0,0,0,13,0,0,0,1 -stay625315,site148,West,16,0,0,6,0,0,0,0,0,4,0,2,0,0,3,1 -stay640412,site157,West,7,0,11,6,3,9,0,0,0,4,0,5,0,0,0,1 -stay672543,site148,West,0,0,0,6,6,0,6,0,26,4,0,5,1,0,3,0 -stay679788,site167,West,7,0,0,6,0,0,0,0,0,4,0,0,5,3,3,0 -stay680703,site157,West,7,0,0,6,0,0,10,0,0,4,3,5,0,0,0,0 -stay682783,site175,West,16,0,0,6,3,0,6,0,0,2,0,5,1,0,3,0 -stay690168,site154,West,15,0,0,6,6,0,10,0,0,0,0,0,0,0,0,0 -stay691052,site167,West,7,0,0,6,0,0,6,0,0,4,0,5,0,0,0,0 -stay693897,site146,West,15,0,11,6,3,0,6,0,0,4,0,13,0,0,3,0 -stay695514,site167,West,12,0,4,6,0,0,6,0,0,4,0,13,0,0,0,1 -stay700930,site171,West,7,0,0,6,0,0,0,0,0,0,3,5,0,0,0,0 -stay709956,site167,West,12,0,0,6,3,0,6,0,0,0,0,0,0,3,0,0 -stay713512,site165,West,18,0,4,6,3,0,0,0,0,4,0,5,0,0,0,0 -stay715085,site167,West,7,0,11,6,0,0,0,0,0,2,0,5,0,0,3,0 -stay722936,site148,West,12,0,0,6,0,0,6,0,5,4,0,5,0,0,3,1 -stay725182,site154,West,12,0,0,6,0,0,0,0,5,4,0,13,1,0,0,0 -stay731227,site167,West,0,0,11,6,6,0,6,0,26,2,0,5,0,0,3,0 -stay735476,site176,NULL,12,0,11,6,3,0,6,0,0,4,0,5,0,3,3,0 -stay739214,site157,West,18,0,0,6,0,0,0,0,0,2,0,5,1,3,0,1 -stay739992,site155,West,0,0,0,6,3,0,0,0,0,4,0,5,0,0,0,0 -stay744506,site157,West,0,0,4,6,3,0,6,0,0,2,0,13,0,0,3,0 -stay745736,site165,West,7,0,0,6,3,0,0,0,13,4,0,5,1,0,3,0 -stay757240,site167,West,7,0,4,6,3,9,0,0,5,2,0,5,0,0,0,0 -stay764013,site167,West,7,0,0,6,0,0,0,0,0,2,0,13,0,0,3,0 -stay765251,site167,West,7,0,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay769926,site165,West,7,0,0,6,6,0,6,0,0,2,0,13,0,0,0,0 -stay774726,site146,West,12,0,4,6,3,0,10,0,13,0,0,5,0,3,0,0 -stay774810,site157,West,7,0,0,6,6,9,6,0,0,0,0,5,5,0,3,0 -stay780005,site167,West,7,0,11,6,0,9,6,0,0,4,0,13,0,3,0,0 -stay784068,site157,West,12,0,0,6,0,0,0,0,5,0,0,5,1,0,0,1 -stay797135,site146,West,12,0,0,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay804391,site165,West,12,0,0,6,3,0,10,0,0,2,3,5,5,0,0,0 -stay806349,site155,West,18,0,0,6,6,0,6,0,0,2,3,5,0,0,0,1 -stay807744,site148,West,12,0,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay807997,site165,West,7,0,0,6,0,0,0,0,26,2,3,5,0,3,3,0 -stay813288,site157,West,7,0,4,0,3,0,6,0,0,0,0,5,0,0,3,0 -stay816230,site165,West,7,0,11,6,6,9,6,0,13,4,3,5,5,0,0,1 -stay819599,site167,West,7,0,0,6,3,0,6,0,7,2,0,5,0,0,3,0 -stay822457,site175,West,7,0,11,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay827595,site154,West,16,0,0,6,3,0,6,0,0,4,0,13,0,0,0,1 -stay838098,site171,West,0,0,0,6,0,0,0,0,0,0,0,0,0,3,0,0 -stay841207,site167,West,18,0,0,6,3,0,6,0,0,2,0,5,1,0,0,0 -stay844203,site175,West,12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -stay851378,site175,West,7,0,4,6,6,0,0,0,0,4,3,13,1,3,3,0 -stay857208,site171,West,18,0,4,6,3,0,6,0,0,0,0,5,0,0,3,0 -stay858523,site154,West,18,0,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay863986,site176,NULL,12,0,0,6,0,0,0,0,0,2,0,5,0,3,0,0 -stay868530,site165,West,7,0,0,6,3,0,6,0,0,4,0,13,1,0,12,0 -stay881156,site158,West,7,0,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay881849,site165,West,0,0,0,6,3,0,0,0,0,4,0,5,0,0,0,0 -stay885624,site148,West,12,0,11,6,3,0,6,0,0,4,0,13,0,0,0,0 -stay896643,site157,West,15,0,0,6,0,0,6,0,0,2,0,5,0,3,0,0 -stay921087,site167,West,12,0,0,6,0,0,6,0,0,7,0,13,0,0,3,1 -stay923906,site167,West,0,0,0,0,3,0,6,0,5,4,3,0,0,0,3,0 -stay924944,site152,West,12,0,0,6,0,0,6,0,0,4,0,5,1,0,3,0 -stay925012,site157,West,18,0,11,0,6,0,6,0,0,4,0,13,1,0,3,1 -stay925855,site154,West,16,0,4,6,0,0,0,0,0,4,0,5,0,0,3,0 -stay928821,site175,West,18,0,0,6,0,0,0,0,0,4,0,0,0,0,0,0 -stay946791,site176,NULL,0,0,0,6,0,0,6,0,0,4,0,0,0,3,0,0 -stay955846,site157,West,15,0,0,6,0,0,6,0,5,0,0,0,0,0,0,0 -stay955930,site165,West,18,0,0,6,3,0,0,0,0,2,0,5,5,0,3,1 -stay960117,site180,South,7,9,0,6,3,0,0,0,26,4,0,5,1,0,3,0 -stay960321,site182,South,18,0,0,6,3,0,6,0,0,0,0,5,1,0,3,0 -stay960564,site181,South,12,0,0,6,0,0,10,0,7,4,0,5,1,0,3,0 -stay961149,site183,South,18,,0,6,0,0,6,0,0,2,0,0,0,0,3,0 -stay962287,site183,South,7,,0,6,3,9,6,0,0,2,0,5,0,0,0,0 -stay962787,site183,South,18,,0,6,0,0,0,0,0,0,0,5,1,0,0,0 -stay962806,site184,South,7,9,11,6,0,0,10,0,0,4,0,5,0,3,3,0 -stay963512,site183,South,15,9,11,6,0,0,6,0,26,4,0,13,1,0,0,0 -stay963996,site180,South,7,11,4,6,0,0,6,0,0,4,0,5,0,0,0,0 -stay964489,site181,South,18,6,4,6,3,0,0,0,13,2,3,5,1,0,0,0 -stay966033,site182,South,12,0,11,6,0,0,10,0,26,2,3,5,1,0,0,0 -stay966487,site180,South,7,,0,6,0,0,6,0,0,2,0,5,0,0,3,0 -stay967973,site184,South,7,0,0,6,0,0,0,0,0,0,0,5,1,3,0,0 -stay969204,site184,South,15,0,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay969367,site182,South,7,11,0,6,6,0,0,0,0,4,0,2,0,3,0,0 -stay970044,site184,South,18,,0,6,3,0,10,0,0,2,0,13,1,0,3,0 -stay970782,site180,South,7,6,0,6,0,0,0,0,26,0,3,2,0,0,3,0 -stay970859,site184,South,7,,4,6,0,0,10,0,0,2,0,0,0,0,0,0 -stay972795,site183,South,12,,0,6,0,0,6,0,0,0,3,5,0,0,3,0 -stay974086,site181,South,15,0,4,6,3,0,10,0,5,2,3,13,0,0,0,0 -stay974580,site183,South,12,6,4,8,0,0,0,0,13,0,0,5,0,0,3,0 -stay976944,site181,South,18,0,0,6,0,0,6,0,7,2,3,13,0,0,0,0 -stay977664,site184,South,15,,0,6,3,0,6,0,13,2,3,13,1,0,0,0 -stay977708,site183,South,7,,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay978991,site181,South,18,0,0,6,0,0,0,0,0,7,0,13,0,3,3,0 -stay980062,site181,South,7,9,0,6,6,0,6,17,26,4,3,5,1,0,0,1 -stay980146,site183,South,7,0,4,6,0,0,6,0,0,0,0,5,1,0,0,0 -stay980949,site188,South,15,0,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay982655,site188,South,15,,11,6,3,0,0,0,0,2,3,5,0,0,0,0 -stay986282,site188,South,12,0,0,6,3,0,10,0,5,4,3,5,0,0,3,0 -stay993578,site188,South,18,0,4,6,3,0,6,0,0,2,3,5,0,0,0,0 -stay998738,site188,South,12,,4,6,3,0,0,0,0,2,3,13,0,0,0,0 -stay1000187,site188,South,7,0,0,6,6,0,0,0,0,2,3,13,5,0,3,0 -stay1000586,site188,South,12,0,0,6,0,0,6,0,5,4,0,5,1,0,0,1 -stay1003266,site188,South,12,9,0,6,0,0,0,0,0,0,0,0,0,0,0,0 -stay1005624,site188,South,7,9,4,6,0,0,6,10,13,0,3,0,0,0,3,0 -stay1013789,site188,South,7,,0,6,0,0,0,0,5,0,0,5,0,0,0,0 -stay1023366,site188,South,0,,0,6,6,0,0,0,5,4,0,5,0,0,0,0 -stay1026742,site188,South,7,,11,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay1029620,site188,South,0,,0,6,0,0,0,0,0,0,0,13,0,0,0,0 -stay1031993,site188,South,18,,11,6,0,0,0,10,5,4,0,5,0,0,12,0 -stay1033429,site188,South,7,11,11,6,6,0,6,0,5,0,0,5,0,0,0,1 -stay1033634,site188,South,7,,0,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay1043096,site188,South,7,,0,6,6,0,0,0,0,0,0,0,0,0,3,0 -stay1044410,site188,South,12,,0,6,6,0,10,0,0,0,3,5,1,0,0,0 -stay1045002,site188,South,12,9,4,6,6,4,6,0,0,0,0,13,0,0,0,0 -stay1046202,site188,South,18,,4,6,0,0,0,0,0,0,0,0,0,0,0,0 -stay1050983,site188,South,7,,0,6,0,0,0,0,0,4,0,5,0,3,0,0 -stay1055742,site198,South,0,0,0,6,3,0,0,0,0,0,3,5,0,0,0,0 -stay1056509,site198,South,0,,0,6,0,0,6,0,0,4,3,5,0,0,3,0 -stay1057548,site198,South,7,0,11,6,0,0,0,0,13,2,3,0,0,3,0,0 -stay1058422,site197,South,7,0,11,6,6,0,0,0,0,0,3,5,0,0,0,0 -stay1062049,site197,South,18,0,4,6,3,0,0,0,0,0,0,5,0,3,0,0 -stay1062653,site197,South,7,,11,6,3,0,10,0,13,4,3,5,1,0,3,1 -stay1068643,site197,South,15,,4,6,0,0,0,0,5,2,0,5,0,3,3,0 -stay1069370,site197,South,16,,4,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay1069749,site198,South,15,,0,6,0,0,6,0,0,0,0,5,1,0,0,0 -stay1072810,site196,South,18,,4,6,0,0,0,0,0,2,0,0,0,0,0,0 -stay1074108,site198,South,12,0,11,6,3,9,6,0,26,2,0,5,0,0,0,1 -stay1076226,site200,Northeast,7,,0,6,3,0,6,0,5,2,0,5,0,3,3,0 -stay1076606,site199,Northeast,0,0,0,6,0,0,0,0,0,4,0,0,0,0,0,0 -stay1077358,site202,Northeast,16,0,4,6,0,0,6,0,5,4,0,5,0,3,3,1 -stay1080052,site199,Northeast,16,0,4,6,0,0,0,0,0,4,0,5,0,0,3,0 -stay1081466,site205,Northeast,12,,0,6,0,0,0,0,0,2,0,0,0,3,0,0 -stay1081963,site202,Northeast,12,0,11,6,0,0,0,0,26,2,3,13,0,0,3,0 -stay1082373,site205,Northeast,18,,0,6,3,0,6,0,0,0,0,0,0,0,0,0 -stay1082905,site202,Northeast,15,,0,6,0,0,6,0,0,2,0,5,0,0,0,1 -stay1084031,site199,Northeast,16,0,11,6,6,0,6,0,0,2,0,5,0,0,0,0 -stay1084784,site206,Northeast,12,0,11,6,3,0,10,0,0,0,3,13,0,0,0,0 -stay1088435,site199,Northeast,12,0,0,8,6,0,6,0,0,4,3,5,0,0,12,0 -stay1089444,site199,Northeast,15,0,0,6,6,0,6,0,0,4,3,13,1,0,3,1 -stay1090327,site202,Northeast,12,0,0,6,0,0,6,0,5,0,0,5,0,0,0,0 -stay1092001,site203,NULL,12,,11,6,3,0,6,0,0,0,3,5,0,0,0,1 -stay1093040,site202,Northeast,18,0,0,6,0,0,6,0,26,4,0,13,0,0,0,0 -stay1094828,site199,Northeast,18,,0,6,0,0,6,0,5,2,0,5,0,0,0,0 -stay1094849,site202,Northeast,12,0,0,6,6,0,10,0,5,2,3,5,1,0,0,1 -stay1095353,site199,Northeast,16,0,4,6,3,0,0,0,0,4,3,5,0,0,3,1 -stay1096439,site199,Northeast,7,0,0,6,3,0,0,0,26,2,0,5,0,0,0,0 -stay1097030,site199,Northeast,12,0,0,6,3,0,0,0,5,4,3,5,0,3,0,0 -stay1099147,site200,Northeast,18,0,11,6,0,0,6,0,5,4,3,5,1,0,0,0 -stay1099210,site199,Northeast,7,0,0,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1102875,site199,Northeast,12,,4,6,3,0,6,0,13,0,3,13,1,0,0,0 -stay1103257,site200,Northeast,12,,4,6,6,0,10,0,7,4,3,13,1,0,3,0 -stay1104609,site201,NULL,15,,0,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay1105897,site200,Northeast,7,,0,6,6,0,6,0,0,4,0,5,0,0,0,0 -stay1105997,site206,Northeast,12,0,0,6,6,4,6,0,0,4,3,5,0,0,0,0 -stay1106035,site199,Northeast,7,0,4,6,0,0,0,0,5,2,3,0,0,0,0,0 -stay1106553,site206,Northeast,12,0,4,6,0,4,10,0,0,4,0,5,5,0,0,1 -stay1107467,site202,Northeast,7,,0,6,0,0,0,0,0,2,0,13,0,0,0,0 -stay1107541,site202,Northeast,12,0,4,6,0,0,0,0,0,4,0,5,1,0,0,0 -stay1108165,site200,Northeast,7,,0,6,3,0,6,0,0,0,0,0,0,0,0,0 -stay1109131,site202,Northeast,16,0,4,6,6,0,6,0,0,2,3,13,0,0,3,0 -stay1109457,site206,Northeast,12,0,4,6,3,0,6,0,5,4,0,5,0,0,0,1 -stay1113145,site206,Northeast,16,0,0,6,0,0,10,0,0,4,3,13,0,0,0,1 -stay1113208,site199,Northeast,12,0,0,6,0,0,6,0,0,2,0,5,0,0,0,1 -stay1114724,site200,Northeast,16,0,4,6,3,0,0,0,0,2,3,0,0,0,0,0 -stay1115772,site199,Northeast,12,,0,6,0,0,0,0,0,2,0,0,0,3,0,0 -stay1117239,site199,Northeast,7,0,0,6,0,0,0,0,0,0,3,0,0,0,0,0 -stay1119233,site199,Northeast,18,0,11,6,6,0,6,0,26,0,3,13,0,0,0,0 -stay1119288,site199,Northeast,7,0,0,6,0,0,0,0,5,4,0,13,0,0,3,0 -stay1121252,site199,Northeast,0,0,0,6,3,0,0,0,5,4,0,5,0,0,3,0 -stay1121985,site206,Northeast,12,,0,6,6,0,6,0,0,0,3,5,1,0,0,1 -stay1123474,site199,Northeast,16,0,0,6,3,0,0,0,0,0,0,5,0,0,0,0 -stay1126845,site199,Northeast,16,0,4,6,6,0,6,0,0,4,0,13,0,0,3,1 -stay1131430,site206,Northeast,7,0,11,6,3,0,10,0,13,0,3,5,0,0,3,1 -stay1134760,site206,Northeast,12,0,0,6,0,4,6,0,0,2,3,5,0,0,3,0 -stay1135234,site206,Northeast,16,0,0,8,3,0,6,0,5,4,0,5,0,0,0,0 -stay1149332,site207,South,7,0,4,6,0,0,0,0,0,4,0,0,0,0,3,0 -stay1160463,site210,South,16,0,4,6,0,0,6,0,0,0,0,13,0,0,0,1 -stay1160625,site208,South,15,0,0,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay1175784,site208,South,7,0,4,6,6,0,0,0,0,0,0,5,0,0,0,1 -stay1176770,site209,NULL,7,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0 -stay1177158,site208,South,16,0,4,6,3,0,6,0,0,0,3,5,0,0,0,0 -stay1181979,site208,South,15,0,0,6,0,0,0,0,0,2,3,5,0,0,0,0 -stay1183831,site210,South,7,9,0,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1189164,site208,South,12,0,4,6,3,0,6,0,0,4,0,5,0,3,3,0 -stay1203933,site208,South,7,0,4,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay1207963,site208,South,15,0,0,6,0,0,6,0,0,0,0,0,0,0,0,0 -stay1220951,site217,South,18,0,0,6,0,0,6,0,0,0,3,5,0,0,0,0 -stay1222431,site212,NULL,16,0,0,6,3,0,10,0,0,0,3,5,1,0,3,0 -stay1229669,site217,South,7,0,0,6,6,9,6,0,5,0,0,5,0,0,0,1 -stay1244394,site215,South,7,0,0,6,3,0,0,0,0,0,0,5,0,0,0,0 -stay1256137,site217,South,7,0,0,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1259594,site217,South,7,0,0,6,0,0,0,0,0,0,0,5,0,0,3,0 -stay1260715,site220,South,0,0,4,6,6,0,10,0,0,0,3,5,0,0,0,0 -stay1262366,site220,South,12,0,4,6,0,0,0,0,0,2,0,5,0,0,12,0 -stay1271652,site215,South,7,0,11,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay1277372,site217,South,7,0,0,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay1281012,site220,South,16,0,0,6,6,0,10,0,0,0,3,5,1,0,12,0 -stay1285656,site217,South,16,0,4,6,3,0,6,0,0,0,0,5,0,0,3,0 -stay1293281,site217,South,18,0,0,6,0,0,0,0,0,0,3,5,0,0,0,1 -stay1308488,site224,South,18,0,11,6,3,0,0,0,0,0,3,5,0,0,0,0 -stay1310476,site215,South,12,0,0,6,3,0,6,0,5,4,3,5,0,3,3,0 -stay1311054,site215,South,16,0,11,6,6,0,6,0,0,4,0,5,0,0,0,1 -stay1313593,site220,South,7,0,4,0,3,4,6,0,0,4,0,5,0,0,0,1 -stay1318102,site227,West,16,0,0,6,0,0,0,0,5,4,0,5,0,0,0,1 -stay1318897,site226,West,18,0,0,6,0,0,6,0,0,0,3,5,0,0,0,1 -stay1329108,site227,West,12,,0,6,0,0,0,17,0,2,0,13,0,0,0,0 -stay1330371,site227,West,7,,11,6,6,0,0,0,5,2,3,5,0,0,3,0 -stay1331036,site227,West,7,,0,6,0,0,6,0,0,0,0,5,0,0,12,0 -stay1337567,site226,West,18,0,0,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay1341158,site227,West,18,,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay1348181,site227,West,18,0,0,6,0,0,0,0,26,2,0,2,0,0,0,0 -stay1350990,site226,West,7,,4,6,3,0,0,0,0,4,0,5,1,3,0,0 -stay1356914,site227,West,12,0,0,6,6,0,6,0,0,4,0,13,0,0,12,1 -stay1358786,site226,West,18,,0,6,3,0,10,0,7,0,3,5,1,0,3,1 -stay1359679,site227,West,16,0,11,6,0,0,6,0,0,7,3,13,0,0,3,1 -stay1361409,site226,West,18,,0,6,3,0,6,0,0,7,0,5,0,3,0,1 -stay1361818,site227,West,7,,0,6,0,0,0,0,0,0,3,5,0,3,0,0 -stay1365966,site226,West,12,,0,6,0,4,0,0,0,0,0,5,0,0,0,0 -stay1439565,site244,South,18,,4,6,3,0,10,0,0,2,3,5,1,0,3,0 -stay1453647,site243,South,12,0,0,6,0,0,0,0,0,11,3,13,0,0,3,0 -stay1459389,site243,South,12,0,4,6,0,0,0,0,0,2,0,0,0,0,0,0 -stay1462129,site243,South,18,,0,6,3,0,0,0,0,0,0,5,0,0,0,0 -stay1468150,site243,South,12,,0,6,6,0,10,0,0,0,3,5,0,0,0,0 -stay1473337,site243,South,7,,0,6,3,0,0,0,0,4,3,13,0,0,0,0 -stay1485125,site243,South,15,0,0,6,3,0,10,0,0,2,3,13,1,0,0,0 -stay1487482,site243,South,7,0,0,8,0,0,6,0,0,0,0,5,0,0,3,0 -stay1488007,site244,South,7,,0,6,0,0,0,0,0,7,0,5,0,0,0,0 -stay1498863,site243,South,12,,0,6,3,0,6,0,0,11,0,5,0,0,3,0 -stay1504052,site243,South,18,0,11,6,0,0,6,0,0,4,0,13,0,3,0,0 -stay1507386,site243,South,0,,0,6,0,0,0,0,5,4,3,5,0,0,0,0 -stay1510497,site243,South,18,0,0,6,0,0,0,0,0,2,3,5,1,3,0,1 -stay1514662,site243,South,7,0,4,6,3,0,0,0,0,7,0,5,0,3,3,0 -stay1521920,site243,South,18,,0,6,0,0,6,0,0,2,3,5,0,0,0,0 -stay1526298,site245,South,0,0,0,6,3,0,10,0,0,4,3,0,0,0,0,0 -stay1526419,site244,South,18,,4,6,0,0,6,0,0,2,0,5,0,0,0,0 -stay1526491,site243,South,16,0,11,6,0,0,6,0,5,7,3,13,0,0,3,1 -stay1527002,site243,South,18,6,0,6,3,0,6,0,26,2,0,0,0,0,0,0 -stay1541186,site243,South,16,0,11,6,6,0,10,0,0,11,0,13,0,0,3,1 -stay1543563,site245,South,16,,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay1543788,site243,South,7,,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay1548654,site243,South,7,,0,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1553410,site243,South,15,,0,6,3,0,6,0,0,4,0,5,0,0,0,0 -stay1554803,site252,Midwest,7,,0,6,0,0,0,0,0,4,0,5,0,0,3,0 -stay1555466,site252,Midwest,7,0,4,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1558821,site250,Midwest,7,,4,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay1561473,site252,Midwest,16,0,0,6,6,0,6,0,26,4,0,5,0,0,3,1 -stay1562038,site252,Midwest,0,0,11,6,6,9,6,0,0,4,0,5,1,0,0,0 -stay1564327,site252,Midwest,7,,4,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1564928,site252,Midwest,7,11,4,6,0,0,6,0,0,2,3,5,0,0,0,0 -stay1565223,site248,Midwest,7,6,4,6,3,0,6,0,7,2,0,0,0,0,0,0 -stay1565448,site252,Midwest,18,0,11,6,3,0,0,0,0,4,0,13,5,0,0,0 -stay1565572,site248,Midwest,18,0,4,6,6,0,10,0,7,7,0,5,1,0,0,1 -stay1566492,site248,Midwest,16,0,4,6,6,0,10,0,0,0,3,13,0,0,3,0 -stay1567111,site248,Midwest,16,0,11,6,0,0,6,0,0,4,3,5,5,0,3,0 -stay1567156,site248,Midwest,18,,11,6,3,0,6,0,0,4,0,13,0,0,3,1 -stay1567325,site248,Midwest,15,0,4,6,0,0,0,0,13,4,0,5,0,0,0,0 -stay1569276,site252,Midwest,7,0,11,6,0,0,0,0,0,0,3,0,0,0,0,0 -stay1570132,site252,Midwest,12,0,0,6,6,0,6,0,5,0,3,13,1,0,3,1 -stay1570602,site252,Midwest,15,0,0,6,0,0,0,0,26,4,0,0,1,0,0,0 -stay1573042,site252,Midwest,16,11,0,6,6,9,0,0,26,2,0,13,0,0,0,1 -stay1573336,site252,Midwest,7,0,0,6,0,0,0,0,26,2,0,0,1,0,3,1 -stay1573601,site253,Midwest,16,0,0,6,0,0,0,0,0,4,0,0,0,0,0,1 -stay1575432,site250,Midwest,18,,0,6,0,0,6,0,0,2,0,13,0,0,0,0 -stay1575797,site252,Midwest,0,0,4,6,3,0,6,0,0,4,3,5,0,0,3,0 -stay1576142,site252,Midwest,7,0,0,6,0,0,0,0,5,2,0,13,0,0,0,0 -stay1577746,site252,Midwest,15,0,0,0,3,0,0,0,0,0,3,5,0,0,3,0 -stay1578613,site248,Midwest,7,0,0,6,0,0,0,0,7,0,3,5,0,3,0,0 -stay1580497,site253,Midwest,18,0,0,6,3,0,6,0,0,0,0,5,0,0,0,1 -stay1580669,site251,Midwest,18,,0,6,0,0,10,0,0,4,3,13,0,0,0,0 -stay1582836,site248,Midwest,0,0,0,6,0,0,10,0,0,4,3,5,0,0,3,0 -stay1582988,site248,Midwest,7,,0,6,0,0,6,0,0,4,0,13,5,0,3,0 diff --git a/notebooks/eicu_test_1.csv b/notebooks/eicu_test_1.csv deleted file mode 100644 index 25a99cc..0000000 --- a/notebooks/eicu_test_1.csv +++ /dev/null @@ -1,150 +0,0 @@ -id,site_hospital,site_region,age,pao2fio2,uo,admissiontype,bicarbonate,bilirubin,bun,chron_dis,gcs,hr,potassium,sbp,sodium,tempc,wbc,event_death -stay147985,site73,Midwest,16,0,4,6,3,0,6,0,5,0,0,5,1,0,0,1 -stay156248,site73,Midwest,7,0,0,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay156308,site60,Midwest,18,0,0,6,0,0,6,0,0,0,3,5,1,0,0,0 -stay157820,site73,Midwest,12,0,11,6,3,0,10,0,0,0,0,0,1,0,0,0 -stay159036,site73,Midwest,18,0,0,6,0,0,6,0,0,4,0,5,0,0,3,0 -stay161900,site63,Midwest,7,0,0,6,3,0,0,0,26,4,0,5,0,0,3,1 -stay161964,site60,Midwest,12,0,0,6,0,0,0,0,0,4,0,0,0,0,3,0 -stay167204,site56,Midwest,15,0,0,6,0,0,0,0,0,0,0,13,0,0,3,0 -stay167957,site63,Midwest,0,0,0,6,0,0,0,0,7,2,3,5,1,0,0,1 -stay177509,site73,Midwest,12,0,0,6,0,0,6,0,0,0,0,5,0,3,3,0 -stay177873,site73,Midwest,7,0,0,6,6,9,6,0,5,4,0,5,0,0,0,0 -stay178652,site59,Midwest,0,0,0,6,0,9,0,0,0,4,3,5,0,3,0,0 -stay180870,site71,Midwest,18,0,0,6,3,0,6,0,0,0,0,13,0,0,0,1 -stay181437,site63,Midwest,15,0,0,8,0,0,6,0,0,2,0,0,0,0,0,0 -stay182413,site58,Midwest,12,0,11,6,0,0,6,9,0,0,0,5,0,0,3,0 -stay186957,site73,Midwest,7,0,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay187667,site73,Midwest,15,0,0,6,6,0,10,0,0,4,3,5,0,0,0,0 -stay187743,site63,Midwest,15,0,0,6,0,0,6,0,5,2,0,5,0,3,0,0 -stay189842,site66,Midwest,18,0,0,6,3,0,0,0,0,2,0,5,1,0,0,0 -stay205928,site67,Midwest,7,0,0,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay213451,site73,Midwest,12,0,0,6,6,0,6,0,13,4,3,5,1,3,3,0 -stay215734,site73,Midwest,7,0,0,6,3,0,6,0,0,4,3,0,0,3,0,0 -stay218304,site58,Midwest,16,0,0,6,0,0,6,0,7,2,3,0,0,0,0,0 -stay219737,site73,Midwest,7,0,4,6,0,0,0,0,0,4,0,2,0,3,3,0 -stay220660,site58,Midwest,12,0,11,6,0,0,6,0,0,2,0,5,0,0,3,0 -stay222966,site73,Midwest,12,0,0,6,3,0,6,0,0,0,0,5,0,3,3,0 -stay223446,site59,Midwest,12,0,11,6,0,0,0,9,0,7,0,5,0,0,3,0 -stay223811,site73,Midwest,0,0,0,6,3,0,0,0,0,7,0,13,0,3,0,0 -stay227824,site73,Midwest,7,0,11,6,0,0,0,0,0,0,3,5,0,3,0,0 -stay228974,site73,Midwest,16,0,0,6,0,0,6,0,5,2,0,5,0,0,0,0 -stay229005,site73,Midwest,18,0,0,6,0,0,6,0,7,2,3,13,0,0,0,0 -stay233773,site60,Midwest,16,0,0,6,0,0,6,0,0,2,0,13,0,0,3,0 -stay235513,site69,Midwest,0,0,0,6,0,0,0,0,0,4,0,13,0,3,3,0 -stay237269,site71,Midwest,18,0,0,6,3,0,6,0,0,2,3,5,0,0,0,0 -stay239061,site66,Midwest,7,0,0,6,0,0,0,0,0,0,0,2,0,0,0,0 -stay239314,site73,Midwest,12,0,11,6,0,0,6,0,0,2,0,5,0,0,0,0 -stay241289,site73,Midwest,18,0,0,6,6,0,6,0,5,2,0,5,0,0,0,1 -stay246348,site79,Midwest,7,0,0,6,0,0,0,10,5,0,0,5,0,0,0,0 -stay251523,site95,Midwest,18,,4,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay252446,site79,Midwest,7,9,4,6,0,0,6,0,0,0,0,0,0,0,3,0 -stay254141,site79,Midwest,0,,11,6,3,0,6,0,0,4,0,5,0,0,12,0 -stay259621,site79,Midwest,18,9,11,6,6,0,0,0,13,4,0,0,0,0,3,1 -stay260689,site79,Midwest,12,0,0,6,3,0,10,0,7,4,0,0,0,0,3,0 -stay260860,site95,Midwest,7,0,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay263047,site92,Midwest,12,,0,6,0,0,0,0,5,0,0,0,0,0,0,0 -stay264276,site92,Midwest,0,11,11,6,0,0,0,0,13,2,0,5,0,0,3,0 -stay264540,site79,Midwest,7,9,0,6,0,0,0,0,13,0,3,5,0,0,3,0 -stay266168,site85,NULL,12,,0,6,0,0,6,0,0,2,3,5,0,0,0,0 -stay269081,site102,NULL,16,0,4,6,3,0,6,0,5,0,0,5,0,3,0,1 -stay271391,site102,NULL,0,9,0,6,3,0,0,0,5,0,0,5,0,0,0,0 -stay277066,site95,Midwest,0,,0,6,0,0,0,0,0,0,0,5,0,3,0,0 -stay279355,site79,Midwest,16,9,0,6,0,0,0,0,7,2,0,5,0,0,0,0 -stay281219,site79,Midwest,16,,0,6,3,0,6,0,0,0,0,0,0,0,0,0 -stay282527,site102,NULL,0,11,4,6,6,0,0,0,13,4,3,13,0,3,12,0 -stay292016,site92,Midwest,7,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0 -stay294603,site79,Midwest,12,9,0,6,3,0,0,0,5,2,0,5,0,0,3,0 -stay294762,site95,Midwest,7,0,4,6,3,0,0,0,0,0,0,0,0,0,0,0 -stay295624,site95,Midwest,16,0,0,6,0,0,0,0,0,2,0,0,5,0,0,0 -stay297044,site92,Midwest,12,0,0,6,0,0,0,9,0,4,3,0,0,0,12,1 -stay297859,site84,Midwest,12,11,0,6,0,0,6,0,7,0,0,5,1,0,3,0 -stay302880,site95,Midwest,7,,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay304989,site79,Midwest,7,0,4,6,0,0,0,0,7,4,0,5,0,0,0,0 -stay305674,site95,Midwest,12,0,11,6,3,0,0,0,0,2,0,5,0,0,0,0 -stay308218,site92,Midwest,15,,4,6,3,0,6,0,0,0,0,5,0,3,3,0 -stay308237,site79,Midwest,7,0,0,6,3,0,6,0,0,0,3,5,0,0,0,0 -stay315114,site125,South,18,0,0,6,0,0,0,0,0,2,0,0,0,0,0,0 -stay316957,site122,South,7,,0,6,0,0,0,9,0,2,3,13,0,0,12,0 -stay318153,site122,South,15,0,11,6,6,0,6,0,7,0,3,13,0,0,0,1 -stay332259,site122,South,7,9,11,6,6,0,10,10,0,0,3,5,0,0,0,0 -stay332661,site125,South,7,6,4,6,3,0,0,0,5,0,0,0,0,0,3,0 -stay336740,site112,South,12,,11,6,0,0,0,0,0,0,0,5,0,0,3,0 -stay337427,site110,South,15,0,11,6,6,0,6,0,0,0,0,5,0,0,3,1 -stay339428,site120,South,18,0,0,6,0,0,0,0,0,4,0,5,0,0,3,0 -stay341174,site122,South,7,0,0,6,3,0,0,0,5,4,0,5,0,3,0,0 -stay342749,site122,South,16,0,11,6,0,0,6,0,7,0,0,13,0,0,0,0 -stay343178,site125,South,7,0,0,6,3,9,6,0,0,7,0,5,0,0,3,0 -stay344919,site122,South,18,,11,6,0,0,0,0,0,0,0,0,0,0,3,0 -stay346306,site122,South,12,0,0,6,3,0,0,9,0,4,3,5,0,0,0,0 -stay355077,site122,South,12,0,4,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay355410,site110,South,15,0,0,6,3,0,0,0,0,0,3,5,5,0,0,0 -stay357122,site122,South,7,0,0,6,0,0,6,0,13,2,3,0,0,0,0,0 -stay368692,site122,South,18,,4,6,0,0,0,0,5,4,0,0,0,0,0,0 -stay373125,site122,South,12,0,11,0,0,4,0,0,0,2,0,5,0,0,0,0 -stay382391,site122,South,7,0,4,6,3,0,0,0,0,0,0,5,1,0,3,0 -stay394170,site122,South,15,,0,6,0,0,6,0,0,0,0,5,0,3,0,0 -stay394324,site112,South,16,0,4,6,3,0,6,0,0,2,0,5,0,0,0,0 -stay396703,site122,South,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0 -stay397142,site122,South,0,0,0,6,0,0,0,0,0,2,0,5,0,0,3,0 -stay403303,site122,South,15,0,11,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay411293,site122,South,18,6,4,6,0,0,6,10,0,0,0,5,0,0,0,1 -stay412911,site122,South,18,9,11,6,6,0,6,9,0,4,0,13,0,0,12,1 -stay423253,site112,South,16,6,0,6,0,0,0,0,5,0,0,5,0,0,0,0 -stay427732,site143,NULL,15,11,0,6,6,0,6,0,0,4,3,13,1,0,12,1 -stay429163,site142,South,12,11,0,6,0,0,6,0,0,2,0,0,0,0,0,0 -stay442854,site144,South,18,9,0,6,0,0,6,0,0,2,3,5,0,0,0,0 -stay443192,site141,South,7,,4,6,6,0,0,0,0,4,0,5,0,0,3,0 -stay447632,site141,South,12,,4,6,3,4,0,9,0,4,3,5,0,0,0,1 -stay450744,site141,South,12,9,0,8,3,0,6,0,0,4,0,5,5,0,0,0 -stay458489,site142,South,18,,0,6,0,0,0,10,0,2,0,5,0,0,3,0 -stay460583,site142,South,12,6,0,6,6,0,0,0,13,4,0,5,1,0,0,0 -stay462690,site141,South,18,,0,6,0,0,6,0,0,0,0,5,0,0,3,0 -stay477532,site144,South,7,11,11,6,6,0,0,17,0,4,0,13,0,0,0,0 -stay488368,site141,South,18,0,11,6,6,0,6,0,0,0,3,5,1,0,0,0 -stay491382,site140,South,18,0,4,6,3,0,6,0,26,2,3,5,1,0,3,0 -stay494366,site140,South,18,,0,6,0,0,6,0,0,0,0,5,0,0,3,0 -stay496613,site144,South,15,0,11,6,3,0,6,0,0,7,0,5,0,0,3,0 -stay498353,site142,South,7,,0,6,0,0,0,0,0,0,0,5,0,0,12,0 -stay500823,site142,South,12,6,11,6,3,0,6,0,0,4,3,13,0,0,3,0 -stay505156,site144,South,18,,4,6,6,0,10,0,0,0,3,5,0,0,3,0 -stay513669,site141,South,0,,11,6,0,0,0,0,0,4,3,5,0,0,0,0 -stay522307,site142,South,7,0,0,6,3,0,10,0,0,4,3,13,0,0,3,0 -stay522954,site144,South,7,,11,6,0,0,0,0,0,0,0,13,0,0,0,0 -stay526988,site141,South,15,0,0,6,3,0,6,0,7,4,3,5,0,3,0,1 -stay532881,site140,South,16,,4,6,0,0,6,0,0,2,0,5,1,0,0,0 -stay546496,site175,West,12,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0 -stay555710,site148,West,15,0,0,6,0,0,6,0,0,4,0,5,1,0,3,0 -stay561748,site167,West,18,0,11,6,0,9,0,0,0,0,0,5,0,0,0,0 -stay567865,site157,West,15,0,0,6,0,0,6,0,7,2,0,13,0,3,0,0 -stay570953,site175,West,18,0,0,6,6,0,10,0,26,2,3,5,0,0,0,0 -stay571553,site155,West,12,0,0,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay574735,site167,West,0,0,0,6,0,0,6,0,0,0,0,13,0,0,0,0 -stay577746,site154,West,0,0,0,6,3,0,0,0,26,4,0,0,0,0,3,0 -stay580326,site171,West,16,0,4,0,6,0,6,0,13,4,3,5,5,0,3,0 -stay581427,site148,West,18,0,0,0,3,0,0,0,0,4,0,5,0,0,0,1 -stay583687,site154,West,18,0,0,6,6,0,6,0,0,4,3,5,0,0,0,0 -stay586393,site171,West,7,11,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay589042,site154,West,15,0,0,6,6,0,6,0,0,4,0,13,1,0,3,1 -stay591957,site167,West,7,0,4,6,6,9,6,0,0,4,3,0,0,0,3,0 -stay602482,site165,West,7,0,0,6,0,0,0,0,7,0,0,5,0,0,0,0 -stay602996,site157,West,16,0,0,6,0,0,6,0,0,2,0,5,0,0,3,0 -stay609431,site146,West,16,0,0,0,0,0,0,0,26,0,3,5,1,0,3,0 -stay611252,site167,West,7,0,11,6,6,0,6,0,0,2,3,13,1,0,0,0 -stay616559,site167,West,15,0,0,6,0,0,6,0,26,0,0,13,0,0,3,0 -stay620641,site176,NULL,15,0,0,6,0,0,6,0,0,2,3,5,0,0,0,0 -stay624235,site157,West,12,0,0,6,0,0,6,0,0,0,0,13,0,0,0,1 -stay625315,site148,West,16,0,0,6,0,0,0,0,0,4,0,2,0,0,3,1 -stay640412,site157,West,7,0,11,6,3,9,0,0,0,4,0,5,0,0,0,1 -stay672543,site148,West,0,0,0,6,6,0,6,0,26,4,0,5,1,0,3,0 -stay679788,site167,West,7,0,0,6,0,0,0,0,0,4,0,0,5,3,3,0 -stay680703,site157,West,7,0,0,6,0,0,10,0,0,4,3,5,0,0,0,0 -stay682783,site175,West,16,0,0,6,3,0,6,0,0,2,0,5,1,0,3,0 -stay690168,site154,West,15,0,0,6,6,0,10,0,0,0,0,0,0,0,0,0 -stay691052,site167,West,7,0,0,6,0,0,6,0,0,4,0,5,0,0,0,0 -stay693897,site146,West,15,0,11,6,3,0,6,0,0,4,0,13,0,0,3,0 -stay695514,site167,West,12,0,4,6,0,0,6,0,0,4,0,13,0,0,0,1 -stay700930,site171,West,7,0,0,6,0,0,0,0,0,0,3,5,0,0,0,0 -stay709956,site167,West,12,0,0,6,3,0,6,0,0,0,0,0,0,3,0,0 -stay713512,site165,West,18,0,4,6,3,0,0,0,0,4,0,5,0,0,0,0 -stay715085,site167,West,7,0,11,6,0,0,0,0,0,2,0,5,0,0,3,0 \ No newline at end of file diff --git a/notebooks/eicu_test_2.csv b/notebooks/eicu_test_2.csv deleted file mode 100644 index 867cd0d..0000000 --- a/notebooks/eicu_test_2.csv +++ /dev/null @@ -1,150 +0,0 @@ -id,site_hospital,site_region,age,pao2fio2,uo,admissiontype,bicarbonate,bilirubin,bun,chron_dis,gcs,hr,potassium,sbp,sodium,tempc,wbc,event_death -stay722936,site148,West,12,0,0,6,0,0,6,0,5,4,0,5,0,0,3,1 -stay725182,site154,West,12,0,0,6,0,0,0,0,5,4,0,13,1,0,0,0 -stay731227,site167,West,0,0,11,6,6,0,6,0,26,2,0,5,0,0,3,0 -stay735476,site176,NULL,12,0,11,6,3,0,6,0,0,4,0,5,0,3,3,0 -stay739214,site157,West,18,0,0,6,0,0,0,0,0,2,0,5,1,3,0,1 -stay739992,site155,West,0,0,0,6,3,0,0,0,0,4,0,5,0,0,0,0 -stay744506,site157,West,0,0,4,6,3,0,6,0,0,2,0,13,0,0,3,0 -stay745736,site165,West,7,0,0,6,3,0,0,0,13,4,0,5,1,0,3,0 -stay757240,site167,West,7,0,4,6,3,9,0,0,5,2,0,5,0,0,0,0 -stay764013,site167,West,7,0,0,6,0,0,0,0,0,2,0,13,0,0,3,0 -stay765251,site167,West,7,0,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay769926,site165,West,7,0,0,6,6,0,6,0,0,2,0,13,0,0,0,0 -stay774726,site146,West,12,0,4,6,3,0,10,0,13,0,0,5,0,3,0,0 -stay774810,site157,West,7,0,0,6,6,9,6,0,0,0,0,5,5,0,3,0 -stay780005,site167,West,7,0,11,6,0,9,6,0,0,4,0,13,0,3,0,0 -stay784068,site157,West,12,0,0,6,0,0,0,0,5,0,0,5,1,0,0,1 -stay797135,site146,West,12,0,0,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay804391,site165,West,12,0,0,6,3,0,10,0,0,2,3,5,5,0,0,0 -stay806349,site155,West,18,0,0,6,6,0,6,0,0,2,3,5,0,0,0,1 -stay807744,site148,West,12,0,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay807997,site165,West,7,0,0,6,0,0,0,0,26,2,3,5,0,3,3,0 -stay813288,site157,West,7,0,4,0,3,0,6,0,0,0,0,5,0,0,3,0 -stay816230,site165,West,7,0,11,6,6,9,6,0,13,4,3,5,5,0,0,1 -stay819599,site167,West,7,0,0,6,3,0,6,0,7,2,0,5,0,0,3,0 -stay822457,site175,West,7,0,11,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay827595,site154,West,16,0,0,6,3,0,6,0,0,4,0,13,0,0,0,1 -stay838098,site171,West,0,0,0,6,0,0,0,0,0,0,0,0,0,3,0,0 -stay841207,site167,West,18,0,0,6,3,0,6,0,0,2,0,5,1,0,0,0 -stay844203,site175,West,12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -stay851378,site175,West,7,0,4,6,6,0,0,0,0,4,3,13,1,3,3,0 -stay857208,site171,West,18,0,4,6,3,0,6,0,0,0,0,5,0,0,3,0 -stay858523,site154,West,18,0,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay863986,site176,NULL,12,0,0,6,0,0,0,0,0,2,0,5,0,3,0,0 -stay868530,site165,West,7,0,0,6,3,0,6,0,0,4,0,13,1,0,12,0 -stay881156,site158,West,7,0,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay881849,site165,West,0,0,0,6,3,0,0,0,0,4,0,5,0,0,0,0 -stay885624,site148,West,12,0,11,6,3,0,6,0,0,4,0,13,0,0,0,0 -stay896643,site157,West,15,0,0,6,0,0,6,0,0,2,0,5,0,3,0,0 -stay921087,site167,West,12,0,0,6,0,0,6,0,0,7,0,13,0,0,3,1 -stay923906,site167,West,0,0,0,0,3,0,6,0,5,4,3,0,0,0,3,0 -stay924944,site152,West,12,0,0,6,0,0,6,0,0,4,0,5,1,0,3,0 -stay925012,site157,West,18,0,11,0,6,0,6,0,0,4,0,13,1,0,3,1 -stay925855,site154,West,16,0,4,6,0,0,0,0,0,4,0,5,0,0,3,0 -stay928821,site175,West,18,0,0,6,0,0,0,0,0,4,0,0,0,0,0,0 -stay946791,site176,NULL,0,0,0,6,0,0,6,0,0,4,0,0,0,3,0,0 -stay955846,site157,West,15,0,0,6,0,0,6,0,5,0,0,0,0,0,0,0 -stay955930,site165,West,18,0,0,6,3,0,0,0,0,2,0,5,5,0,3,1 -stay960117,site180,South,7,9,0,6,3,0,0,0,26,4,0,5,1,0,3,0 -stay960321,site182,South,18,0,0,6,3,0,6,0,0,0,0,5,1,0,3,0 -stay960564,site181,South,12,0,0,6,0,0,10,0,7,4,0,5,1,0,3,0 -stay961149,site183,South,18,,0,6,0,0,6,0,0,2,0,0,0,0,3,0 -stay962287,site183,South,7,,0,6,3,9,6,0,0,2,0,5,0,0,0,0 -stay962787,site183,South,18,,0,6,0,0,0,0,0,0,0,5,1,0,0,0 -stay962806,site184,South,7,9,11,6,0,0,10,0,0,4,0,5,0,3,3,0 -stay963512,site183,South,15,9,11,6,0,0,6,0,26,4,0,13,1,0,0,0 -stay963996,site180,South,7,11,4,6,0,0,6,0,0,4,0,5,0,0,0,0 -stay964489,site181,South,18,6,4,6,3,0,0,0,13,2,3,5,1,0,0,0 -stay966033,site182,South,12,0,11,6,0,0,10,0,26,2,3,5,1,0,0,0 -stay966487,site180,South,7,,0,6,0,0,6,0,0,2,0,5,0,0,3,0 -stay967973,site184,South,7,0,0,6,0,0,0,0,0,0,0,5,1,3,0,0 -stay969204,site184,South,15,0,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay969367,site182,South,7,11,0,6,6,0,0,0,0,4,0,2,0,3,0,0 -stay970044,site184,South,18,,0,6,3,0,10,0,0,2,0,13,1,0,3,0 -stay970782,site180,South,7,6,0,6,0,0,0,0,26,0,3,2,0,0,3,0 -stay970859,site184,South,7,,4,6,0,0,10,0,0,2,0,0,0,0,0,0 -stay972795,site183,South,12,,0,6,0,0,6,0,0,0,3,5,0,0,3,0 -stay974086,site181,South,15,0,4,6,3,0,10,0,5,2,3,13,0,0,0,0 -stay974580,site183,South,12,6,4,8,0,0,0,0,13,0,0,5,0,0,3,0 -stay976944,site181,South,18,0,0,6,0,0,6,0,7,2,3,13,0,0,0,0 -stay977664,site184,South,15,,0,6,3,0,6,0,13,2,3,13,1,0,0,0 -stay977708,site183,South,7,,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay978991,site181,South,18,0,0,6,0,0,0,0,0,7,0,13,0,3,3,0 -stay980062,site181,South,7,9,0,6,6,0,6,17,26,4,3,5,1,0,0,1 -stay980146,site183,South,7,0,4,6,0,0,6,0,0,0,0,5,1,0,0,0 -stay980949,site188,South,15,0,0,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay982655,site188,South,15,,11,6,3,0,0,0,0,2,3,5,0,0,0,0 -stay986282,site188,South,12,0,0,6,3,0,10,0,5,4,3,5,0,0,3,0 -stay993578,site188,South,18,0,4,6,3,0,6,0,0,2,3,5,0,0,0,0 -stay998738,site188,South,12,,4,6,3,0,0,0,0,2,3,13,0,0,0,0 -stay1000187,site188,South,7,0,0,6,6,0,0,0,0,2,3,13,5,0,3,0 -stay1000586,site188,South,12,0,0,6,0,0,6,0,5,4,0,5,1,0,0,1 -stay1003266,site188,South,12,9,0,6,0,0,0,0,0,0,0,0,0,0,0,0 -stay1005624,site188,South,7,9,4,6,0,0,6,10,13,0,3,0,0,0,3,0 -stay1013789,site188,South,7,,0,6,0,0,0,0,5,0,0,5,0,0,0,0 -stay1023366,site188,South,0,,0,6,6,0,0,0,5,4,0,5,0,0,0,0 -stay1026742,site188,South,7,,11,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay1029620,site188,South,0,,0,6,0,0,0,0,0,0,0,13,0,0,0,0 -stay1031993,site188,South,18,,11,6,0,0,0,10,5,4,0,5,0,0,12,0 -stay1033429,site188,South,7,11,11,6,6,0,6,0,5,0,0,5,0,0,0,1 -stay1033634,site188,South,7,,0,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay1043096,site188,South,7,,0,6,6,0,0,0,0,0,0,0,0,0,3,0 -stay1044410,site188,South,12,,0,6,6,0,10,0,0,0,3,5,1,0,0,0 -stay1045002,site188,South,12,9,4,6,6,4,6,0,0,0,0,13,0,0,0,0 -stay1046202,site188,South,18,,4,6,0,0,0,0,0,0,0,0,0,0,0,0 -stay1050983,site188,South,7,,0,6,0,0,0,0,0,4,0,5,0,3,0,0 -stay1055742,site198,South,0,0,0,6,3,0,0,0,0,0,3,5,0,0,0,0 -stay1056509,site198,South,0,,0,6,0,0,6,0,0,4,3,5,0,0,3,0 -stay1057548,site198,South,7,0,11,6,0,0,0,0,13,2,3,0,0,3,0,0 -stay1058422,site197,South,7,0,11,6,6,0,0,0,0,0,3,5,0,0,0,0 -stay1062049,site197,South,18,0,4,6,3,0,0,0,0,0,0,5,0,3,0,0 -stay1062653,site197,South,7,,11,6,3,0,10,0,13,4,3,5,1,0,3,1 -stay1068643,site197,South,15,,4,6,0,0,0,0,5,2,0,5,0,3,3,0 -stay1069370,site197,South,16,,4,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay1069749,site198,South,15,,0,6,0,0,6,0,0,0,0,5,1,0,0,0 -stay1072810,site196,South,18,,4,6,0,0,0,0,0,2,0,0,0,0,0,0 -stay1074108,site198,South,12,0,11,6,3,9,6,0,26,2,0,5,0,0,0,1 -stay1076226,site200,Northeast,7,,0,6,3,0,6,0,5,2,0,5,0,3,3,0 -stay1076606,site199,Northeast,0,0,0,6,0,0,0,0,0,4,0,0,0,0,0,0 -stay1077358,site202,Northeast,16,0,4,6,0,0,6,0,5,4,0,5,0,3,3,1 -stay1080052,site199,Northeast,16,0,4,6,0,0,0,0,0,4,0,5,0,0,3,0 -stay1081466,site205,Northeast,12,,0,6,0,0,0,0,0,2,0,0,0,3,0,0 -stay1081963,site202,Northeast,12,0,11,6,0,0,0,0,26,2,3,13,0,0,3,0 -stay1082373,site205,Northeast,18,,0,6,3,0,6,0,0,0,0,0,0,0,0,0 -stay1082905,site202,Northeast,15,,0,6,0,0,6,0,0,2,0,5,0,0,0,1 -stay1084031,site199,Northeast,16,0,11,6,6,0,6,0,0,2,0,5,0,0,0,0 -stay1084784,site206,Northeast,12,0,11,6,3,0,10,0,0,0,3,13,0,0,0,0 -stay1088435,site199,Northeast,12,0,0,8,6,0,6,0,0,4,3,5,0,0,12,0 -stay1089444,site199,Northeast,15,0,0,6,6,0,6,0,0,4,3,13,1,0,3,1 -stay1090327,site202,Northeast,12,0,0,6,0,0,6,0,5,0,0,5,0,0,0,0 -stay1092001,site203,NULL,12,,11,6,3,0,6,0,0,0,3,5,0,0,0,1 -stay1093040,site202,Northeast,18,0,0,6,0,0,6,0,26,4,0,13,0,0,0,0 -stay1094828,site199,Northeast,18,,0,6,0,0,6,0,5,2,0,5,0,0,0,0 -stay1094849,site202,Northeast,12,0,0,6,6,0,10,0,5,2,3,5,1,0,0,1 -stay1095353,site199,Northeast,16,0,4,6,3,0,0,0,0,4,3,5,0,0,3,1 -stay1096439,site199,Northeast,7,0,0,6,3,0,0,0,26,2,0,5,0,0,0,0 -stay1097030,site199,Northeast,12,0,0,6,3,0,0,0,5,4,3,5,0,3,0,0 -stay1099147,site200,Northeast,18,0,11,6,0,0,6,0,5,4,3,5,1,0,0,0 -stay1099210,site199,Northeast,7,0,0,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1102875,site199,Northeast,12,,4,6,3,0,6,0,13,0,3,13,1,0,0,0 -stay1103257,site200,Northeast,12,,4,6,6,0,10,0,7,4,3,13,1,0,3,0 -stay1104609,site201,NULL,15,,0,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay1105897,site200,Northeast,7,,0,6,6,0,6,0,0,4,0,5,0,0,0,0 -stay1105997,site206,Northeast,12,0,0,6,6,4,6,0,0,4,3,5,0,0,0,0 -stay1106035,site199,Northeast,7,0,4,6,0,0,0,0,5,2,3,0,0,0,0,0 -stay1106553,site206,Northeast,12,0,4,6,0,4,10,0,0,4,0,5,5,0,0,1 -stay1107467,site202,Northeast,7,,0,6,0,0,0,0,0,2,0,13,0,0,0,0 -stay1107541,site202,Northeast,12,0,4,6,0,0,0,0,0,4,0,5,1,0,0,0 -stay1108165,site200,Northeast,7,,0,6,3,0,6,0,0,0,0,0,0,0,0,0 -stay1109131,site202,Northeast,16,0,4,6,6,0,6,0,0,2,3,13,0,0,3,0 -stay1109457,site206,Northeast,12,0,4,6,3,0,6,0,5,4,0,5,0,0,0,1 -stay1113145,site206,Northeast,16,0,0,6,0,0,10,0,0,4,3,13,0,0,0,1 -stay1113208,site199,Northeast,12,0,0,6,0,0,6,0,0,2,0,5,0,0,0,1 -stay1114724,site200,Northeast,16,0,4,6,3,0,0,0,0,2,3,0,0,0,0,0 -stay1115772,site199,Northeast,12,,0,6,0,0,0,0,0,2,0,0,0,3,0,0 -stay1117239,site199,Northeast,7,0,0,6,0,0,0,0,0,0,3,0,0,0,0,0 -stay1119233,site199,Northeast,18,0,11,6,6,0,6,0,26,0,3,13,0,0,0,0 -stay1119288,site199,Northeast,7,0,0,6,0,0,0,0,5,4,0,13,0,0,3,0 -stay1121252,site199,Northeast,0,0,0,6,3,0,0,0,5,4,0,5,0,0,3,0 -stay1121985,site206,Northeast,12,,0,6,6,0,6,0,0,0,3,5,1,0,0,1 \ No newline at end of file diff --git a/notebooks/eicu_test_3.csv b/notebooks/eicu_test_3.csv deleted file mode 100644 index 9a84507..0000000 --- a/notebooks/eicu_test_3.csv +++ /dev/null @@ -1,102 +0,0 @@ -id,site_hospital,site_region,age,pao2fio2,uo,admissiontype,bicarbonate,bilirubin,bun,chron_dis,gcs,hr,potassium,sbp,sodium,tempc,wbc,event_death -stay1123474,site199,Northeast,16,0,0,6,3,0,0,0,0,0,0,5,0,0,0,0 -stay1126845,site199,Northeast,16,0,4,6,6,0,6,0,0,4,0,13,0,0,3,1 -stay1131430,site206,Northeast,7,0,11,6,3,0,10,0,13,0,3,5,0,0,3,1 -stay1134760,site206,Northeast,12,0,0,6,0,4,6,0,0,2,3,5,0,0,3,0 -stay1135234,site206,Northeast,16,0,0,8,3,0,6,0,5,4,0,5,0,0,0,0 -stay1149332,site207,South,7,0,4,6,0,0,0,0,0,4,0,0,0,0,3,0 -stay1160463,site210,South,16,0,4,6,0,0,6,0,0,0,0,13,0,0,0,1 -stay1160625,site208,South,15,0,0,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay1175784,site208,South,7,0,4,6,6,0,0,0,0,0,0,5,0,0,0,1 -stay1176770,site209,NULL,7,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0 -stay1177158,site208,South,16,0,4,6,3,0,6,0,0,0,3,5,0,0,0,0 -stay1181979,site208,South,15,0,0,6,0,0,0,0,0,2,3,5,0,0,0,0 -stay1183831,site210,South,7,9,0,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1189164,site208,South,12,0,4,6,3,0,6,0,0,4,0,5,0,3,3,0 -stay1203933,site208,South,7,0,4,6,0,0,0,0,0,2,0,5,0,0,0,0 -stay1207963,site208,South,15,0,0,6,0,0,6,0,0,0,0,0,0,0,0,0 -stay1220951,site217,South,18,0,0,6,0,0,6,0,0,0,3,5,0,0,0,0 -stay1222431,site212,NULL,16,0,0,6,3,0,10,0,0,0,3,5,1,0,3,0 -stay1229669,site217,South,7,0,0,6,6,9,6,0,5,0,0,5,0,0,0,1 -stay1244394,site215,South,7,0,0,6,3,0,0,0,0,0,0,5,0,0,0,0 -stay1256137,site217,South,7,0,0,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1259594,site217,South,7,0,0,6,0,0,0,0,0,0,0,5,0,0,3,0 -stay1260715,site220,South,0,0,4,6,6,0,10,0,0,0,3,5,0,0,0,0 -stay1262366,site220,South,12,0,4,6,0,0,0,0,0,2,0,5,0,0,12,0 -stay1271652,site215,South,7,0,11,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay1277372,site217,South,7,0,0,6,3,0,6,0,0,0,0,5,0,0,0,0 -stay1281012,site220,South,16,0,0,6,6,0,10,0,0,0,3,5,1,0,12,0 -stay1285656,site217,South,16,0,4,6,3,0,6,0,0,0,0,5,0,0,3,0 -stay1293281,site217,South,18,0,0,6,0,0,0,0,0,0,3,5,0,0,0,1 -stay1308488,site224,South,18,0,11,6,3,0,0,0,0,0,3,5,0,0,0,0 -stay1310476,site215,South,12,0,0,6,3,0,6,0,5,4,3,5,0,3,3,0 -stay1311054,site215,South,16,0,11,6,6,0,6,0,0,4,0,5,0,0,0,1 -stay1313593,site220,South,7,0,4,0,3,4,6,0,0,4,0,5,0,0,0,1 -stay1318102,site227,West,16,0,0,6,0,0,0,0,5,4,0,5,0,0,0,1 -stay1318897,site226,West,18,0,0,6,0,0,6,0,0,0,3,5,0,0,0,1 -stay1329108,site227,West,12,,0,6,0,0,0,17,0,2,0,13,0,0,0,0 -stay1330371,site227,West,7,,11,6,6,0,0,0,5,2,3,5,0,0,3,0 -stay1331036,site227,West,7,,0,6,0,0,6,0,0,0,0,5,0,0,12,0 -stay1337567,site226,West,18,0,0,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay1341158,site227,West,18,,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay1348181,site227,West,18,0,0,6,0,0,0,0,26,2,0,2,0,0,0,0 -stay1350990,site226,West,7,,4,6,3,0,0,0,0,4,0,5,1,3,0,0 -stay1356914,site227,West,12,0,0,6,6,0,6,0,0,4,0,13,0,0,12,1 -stay1358786,site226,West,18,,0,6,3,0,10,0,7,0,3,5,1,0,3,1 -stay1359679,site227,West,16,0,11,6,0,0,6,0,0,7,3,13,0,0,3,1 -stay1361409,site226,West,18,,0,6,3,0,6,0,0,7,0,5,0,3,0,1 -stay1361818,site227,West,7,,0,6,0,0,0,0,0,0,3,5,0,3,0,0 -stay1365966,site226,West,12,,0,6,0,4,0,0,0,0,0,5,0,0,0,0 -stay1439565,site244,South,18,,4,6,3,0,10,0,0,2,3,5,1,0,3,0 -stay1453647,site243,South,12,0,0,6,0,0,0,0,0,11,3,13,0,0,3,0 -stay1459389,site243,South,12,0,4,6,0,0,0,0,0,2,0,0,0,0,0,0 -stay1462129,site243,South,18,,0,6,3,0,0,0,0,0,0,5,0,0,0,0 -stay1468150,site243,South,12,,0,6,6,0,10,0,0,0,3,5,0,0,0,0 -stay1473337,site243,South,7,,0,6,3,0,0,0,0,4,3,13,0,0,0,0 -stay1485125,site243,South,15,0,0,6,3,0,10,0,0,2,3,13,1,0,0,0 -stay1487482,site243,South,7,0,0,8,0,0,6,0,0,0,0,5,0,0,3,0 -stay1488007,site244,South,7,,0,6,0,0,0,0,0,7,0,5,0,0,0,0 -stay1498863,site243,South,12,,0,6,3,0,6,0,0,11,0,5,0,0,3,0 -stay1504052,site243,South,18,0,11,6,0,0,6,0,0,4,0,13,0,3,0,0 -stay1507386,site243,South,0,,0,6,0,0,0,0,5,4,3,5,0,0,0,0 -stay1510497,site243,South,18,0,0,6,0,0,0,0,0,2,3,5,1,3,0,1 -stay1514662,site243,South,7,0,4,6,3,0,0,0,0,7,0,5,0,3,3,0 -stay1521920,site243,South,18,,0,6,0,0,6,0,0,2,3,5,0,0,0,0 -stay1526298,site245,South,0,0,0,6,3,0,10,0,0,4,3,0,0,0,0,0 -stay1526419,site244,South,18,,4,6,0,0,6,0,0,2,0,5,0,0,0,0 -stay1526491,site243,South,16,0,11,6,0,0,6,0,5,7,3,13,0,0,3,1 -stay1527002,site243,South,18,6,0,6,3,0,6,0,26,2,0,0,0,0,0,0 -stay1541186,site243,South,16,0,11,6,6,0,10,0,0,11,0,13,0,0,3,1 -stay1543563,site245,South,16,,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay1543788,site243,South,7,,0,6,0,0,6,0,0,0,0,5,0,0,0,0 -stay1548654,site243,South,7,,0,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1553410,site243,South,15,,0,6,3,0,6,0,0,4,0,5,0,0,0,0 -stay1554803,site252,Midwest,7,,0,6,0,0,0,0,0,4,0,5,0,0,3,0 -stay1555466,site252,Midwest,7,0,4,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1558821,site250,Midwest,7,,4,6,0,0,0,0,0,0,0,5,0,0,0,0 -stay1561473,site252,Midwest,16,0,0,6,6,0,6,0,26,4,0,5,0,0,3,1 -stay1562038,site252,Midwest,0,0,11,6,6,9,6,0,0,4,0,5,1,0,0,0 -stay1564327,site252,Midwest,7,,4,6,0,0,0,0,0,4,0,5,0,0,0,0 -stay1564928,site252,Midwest,7,11,4,6,0,0,6,0,0,2,3,5,0,0,0,0 -stay1565223,site248,Midwest,7,6,4,6,3,0,6,0,7,2,0,0,0,0,0,0 -stay1565448,site252,Midwest,18,0,11,6,3,0,0,0,0,4,0,13,5,0,0,0 -stay1565572,site248,Midwest,18,0,4,6,6,0,10,0,7,7,0,5,1,0,0,1 -stay1566492,site248,Midwest,16,0,4,6,6,0,10,0,0,0,3,13,0,0,3,0 -stay1567111,site248,Midwest,16,0,11,6,0,0,6,0,0,4,3,5,5,0,3,0 -stay1567156,site248,Midwest,18,,11,6,3,0,6,0,0,4,0,13,0,0,3,1 -stay1567325,site248,Midwest,15,0,4,6,0,0,0,0,13,4,0,5,0,0,0,0 -stay1569276,site252,Midwest,7,0,11,6,0,0,0,0,0,0,3,0,0,0,0,0 -stay1570132,site252,Midwest,12,0,0,6,6,0,6,0,5,0,3,13,1,0,3,1 -stay1570602,site252,Midwest,15,0,0,6,0,0,0,0,26,4,0,0,1,0,0,0 -stay1573042,site252,Midwest,16,11,0,6,6,9,0,0,26,2,0,13,0,0,0,1 -stay1573336,site252,Midwest,7,0,0,6,0,0,0,0,26,2,0,0,1,0,3,1 -stay1573601,site253,Midwest,16,0,0,6,0,0,0,0,0,4,0,0,0,0,0,1 -stay1575432,site250,Midwest,18,,0,6,0,0,6,0,0,2,0,13,0,0,0,0 -stay1575797,site252,Midwest,0,0,4,6,3,0,6,0,0,4,3,5,0,0,3,0 -stay1576142,site252,Midwest,7,0,0,6,0,0,0,0,5,2,0,13,0,0,0,0 -stay1577746,site252,Midwest,15,0,0,0,3,0,0,0,0,0,3,5,0,0,3,0 -stay1578613,site248,Midwest,7,0,0,6,0,0,0,0,7,0,3,5,0,3,0,0 -stay1580497,site253,Midwest,18,0,0,6,3,0,6,0,0,0,0,5,0,0,0,1 -stay1580669,site251,Midwest,18,,0,6,0,0,10,0,0,4,3,13,0,0,0,0 -stay1582836,site248,Midwest,0,0,0,6,0,0,10,0,0,4,3,5,0,0,3,0 -stay1582988,site248,Midwest,7,,0,6,0,0,6,0,0,4,0,13,5,0,3,0 diff --git a/notebooks/rw/rw_test.ipynb b/notebooks/rw/rw_test.ipynb index a45911d..1d56fd7 100644 --- a/notebooks/rw/rw_test.ipynb +++ b/notebooks/rw/rw_test.ipynb @@ -16,289 +16,24 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, + "id": "b40cac32", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "ac98a166", + "metadata": {}, + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, "id": "2476759d", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[93mWARNING \u001b[0m: DEPRECATED FEATURE: flwr.server.start_server() is deprecated.\n", - "\tInstead, use the `flower-superlink` CLI command to start a SuperLink as shown below:\n", - "\n", - "\t\t$ flower-superlink --insecure\n", - "\n", - "\tTo view usage and all available options, run:\n", - "\n", - "\t\t$ flower-superlink --help\n", - "\n", - "\tUsing `start_server()` is deprecated.\n", - "\n", - " This is a deprecated feature. It will be removed\n", - " entirely in future versions of Flower.\n", - " \n", - "\u001b[92mINFO \u001b[0m: Starting Flower server, config: num_rounds=10, no round_timeout\n", - "\u001b[92mINFO \u001b[0m: Flower ECE: gRPC server running (10 rounds), SSL is disabled\n", - "\u001b[92mINFO \u001b[0m: [INIT]\n", - "\u001b[92mINFO \u001b[0m: Requesting initial parameters from one random client\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Using strategy: FedAvg\n", - "Starting Flower server on 0.0.0.0:8080 with strategy FedAvg\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[92mINFO \u001b[0m: Received initial parameters from one random client\n", - "\u001b[92mINFO \u001b[0m: Starting evaluation of initial global parameters\n", - "\u001b[92mINFO \u001b[0m: Evaluation returned no results (`None`)\n", - "\u001b[92mINFO \u001b[0m: \n", - "\u001b[92mINFO \u001b[0m: [ROUND 1]\n", - "\u001b[92mINFO \u001b[0m: configure_fit: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_fit: received 0 results and 1 failures\n", - "\u001b[92mINFO \u001b[0m: configure_evaluate: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_evaluate: received 1 results and 0 failures\n", - "\u001b[92mINFO \u001b[0m: \n", - "\u001b[92mINFO \u001b[0m: [ROUND 2]\n", - "\u001b[92mINFO \u001b[0m: configure_fit: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_fit: received 0 results and 1 failures\n", - "\u001b[92mINFO \u001b[0m: configure_evaluate: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_evaluate: received 1 results and 0 failures\n", - "\u001b[92mINFO \u001b[0m: \n", - "\u001b[92mINFO \u001b[0m: [ROUND 3]\n", - "\u001b[92mINFO \u001b[0m: configure_fit: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_fit: received 0 results and 1 failures\n", - "\u001b[92mINFO \u001b[0m: configure_evaluate: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_evaluate: received 1 results and 0 failures\n", - "\u001b[92mINFO \u001b[0m: \n", - "\u001b[92mINFO \u001b[0m: [ROUND 4]\n", - "\u001b[92mINFO \u001b[0m: configure_fit: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_fit: received 0 results and 1 failures\n", - "\u001b[92mINFO \u001b[0m: configure_evaluate: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_evaluate: received 1 results and 0 failures\n", - "\u001b[92mINFO \u001b[0m: \n", - "\u001b[92mINFO \u001b[0m: [ROUND 5]\n", - "\u001b[92mINFO \u001b[0m: configure_fit: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_fit: received 0 results and 1 failures\n", - "\u001b[92mINFO \u001b[0m: configure_evaluate: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_evaluate: received 1 results and 0 failures\n", - "\u001b[92mINFO \u001b[0m: \n", - "\u001b[92mINFO \u001b[0m: [ROUND 6]\n", - "\u001b[92mINFO \u001b[0m: configure_fit: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_fit: received 0 results and 1 failures\n", - "\u001b[92mINFO \u001b[0m: configure_evaluate: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_evaluate: received 1 results and 0 failures\n", - "\u001b[92mINFO \u001b[0m: \n", - "\u001b[92mINFO \u001b[0m: [ROUND 7]\n", - "\u001b[92mINFO \u001b[0m: configure_fit: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_fit: received 0 results and 1 failures\n", - "\u001b[92mINFO \u001b[0m: configure_evaluate: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_evaluate: received 1 results and 0 failures\n", - "\u001b[92mINFO \u001b[0m: \n", - "\u001b[92mINFO \u001b[0m: [ROUND 8]\n", - "\u001b[92mINFO \u001b[0m: configure_fit: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_fit: received 0 results and 1 failures\n", - "\u001b[92mINFO \u001b[0m: configure_evaluate: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_evaluate: received 1 results and 0 failures\n", - "\u001b[92mINFO \u001b[0m: \n", - "\u001b[92mINFO \u001b[0m: [ROUND 9]\n", - "\u001b[92mINFO \u001b[0m: configure_fit: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_fit: received 0 results and 1 failures\n", - "\u001b[92mINFO \u001b[0m: configure_evaluate: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_evaluate: received 1 results and 0 failures\n", - "\u001b[92mINFO \u001b[0m: \n", - "\u001b[92mINFO \u001b[0m: [ROUND 10]\n", - "\u001b[92mINFO \u001b[0m: configure_fit: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_fit: received 0 results and 1 failures\n", - "\u001b[92mINFO \u001b[0m: configure_evaluate: strategy sampled 1 clients (out of 1)\n", - "\u001b[92mINFO \u001b[0m: aggregate_evaluate: received 1 results and 0 failures\n", - "\u001b[92mINFO \u001b[0m: \n", - "\u001b[92mINFO \u001b[0m: [SUMMARY]\n", - "\u001b[92mINFO \u001b[0m: Run finished 10 round(s) in 0.17s\n", - "\u001b[92mINFO \u001b[0m: \tHistory (loss, distributed):\n", - "\u001b[92mINFO \u001b[0m: \t\tround 1: 0.0\n", - "\u001b[92mINFO \u001b[0m: \t\tround 2: 0.0\n", - "\u001b[92mINFO \u001b[0m: \t\tround 3: 0.0\n", - "\u001b[92mINFO \u001b[0m: \t\tround 4: 0.0\n", - "\u001b[92mINFO \u001b[0m: \t\tround 5: 0.0\n", - "\u001b[92mINFO \u001b[0m: \t\tround 6: 0.0\n", - "\u001b[92mINFO \u001b[0m: \t\tround 7: 0.0\n", - "\u001b[92mINFO \u001b[0m: \t\tround 8: 0.0\n", - "\u001b[92mINFO \u001b[0m: \t\tround 9: 0.0\n", - "\u001b[92mINFO \u001b[0m: \t\tround 10: 0.0\n", - "\u001b[92mINFO \u001b[0m: \tHistory (metrics, distributed, evaluate):\n", - "\u001b[92mINFO \u001b[0m: \t{'eval_accuracy': [(1, 0.659061558329746),\n", - "\u001b[92mINFO \u001b[0m: \t (2, 0.659061558329746),\n", - "\u001b[92mINFO \u001b[0m: \t (3, 0.659061558329746),\n", - "\u001b[92mINFO \u001b[0m: \t (4, 0.659061558329746),\n", - "\u001b[92mINFO \u001b[0m: \t (5, 0.659061558329746),\n", - "\u001b[92mINFO \u001b[0m: \t (6, 0.659061558329746),\n", - "\u001b[92mINFO \u001b[0m: \t (7, 0.659061558329746),\n", - "\u001b[92mINFO \u001b[0m: \t (8, 0.659061558329746),\n", - "\u001b[92mINFO \u001b[0m: \t (9, 0.659061558329746),\n", - "\u001b[92mINFO \u001b[0m: \t (10, 0.659061558329746)],\n", - "\u001b[92mINFO \u001b[0m: \t 'eval_auc': [(1, 0.9768702703059333),\n", - "\u001b[92mINFO \u001b[0m: \t (2, 0.9768702703059333),\n", - "\u001b[92mINFO \u001b[0m: \t (3, 0.9768702703059333),\n", - "\u001b[92mINFO \u001b[0m: \t (4, 0.9768702703059333),\n", - "\u001b[92mINFO \u001b[0m: \t (5, 0.9768702703059333),\n", - "\u001b[92mINFO \u001b[0m: \t (6, 0.9768702703059333),\n", - "\u001b[92mINFO \u001b[0m: \t (7, 0.9768702703059333),\n", - "\u001b[92mINFO \u001b[0m: \t (8, 0.9768702703059333),\n", - "\u001b[92mINFO \u001b[0m: \t (9, 0.9768702703059333),\n", - "\u001b[92mINFO \u001b[0m: \t (10, 0.9768702703059333)],\n", - "\u001b[92mINFO \u001b[0m: \t 'eval_loss': [(1, 0.0),\n", - "\u001b[92mINFO \u001b[0m: \t (2, 0.0),\n", - "\u001b[92mINFO \u001b[0m: \t (3, 0.0),\n", - "\u001b[92mINFO \u001b[0m: \t (4, 0.0),\n", - "\u001b[92mINFO \u001b[0m: \t (5, 0.0),\n", - "\u001b[92mINFO \u001b[0m: \t (6, 0.0),\n", - "\u001b[92mINFO \u001b[0m: \t (7, 0.0),\n", - "\u001b[92mINFO \u001b[0m: \t (8, 0.0),\n", - "\u001b[92mINFO \u001b[0m: \t (9, 0.0),\n", - "\u001b[92mINFO \u001b[0m: \t (10, 0.0)]}\n", - "\u001b[92mINFO \u001b[0m: \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "✅ Client connected - CID: f0551378b0fb4e2d8387762bce46a59c\n", - "\n", - "📋 [Round 1] Client f0551378b0fb4e2d8387762bce46a59c Properties: {'features': 'Area,MajorAxisLength,MinorAxisLength,Eccentricity,ConvexArea,Extent,Perimeter', 'num_features': 7, 'total_memory_gb': 62.72, 'os_type': 'Linux', 'cpu_physical_cores': 12, 'target': 'label', 'classes': '0.0,1.0', 'label_distribution': '1.0:7650,0.0:3961', 'cpu_logical_cores': 24, 'gpu_driver_present': 'True', 'num_samples': 9288, 'gpu_count': 0, 'hostname': 'dinf-medomi-05b', 'model_type': 'xgb'}\n", - "\n", - "[Server] 🔄 Round 1 - Client Training Metrics:\n", - "[Server] ✅ Round 1 - Aggregated Training Metrics: {}\n", - "\n", - "\n", - "[Server] 📊 Round 1 - Client Evaluation Metrics:\n", - " CEM Round 1 Client:f0551378b0fb4e2d8387762bce46a59c: {'eval_auc': 0.9768702703059333, 'eval_accuracy': 0.659061558329746}\n", - "[Server] ✅ Round 1 - Aggregated Evaluation Metrics:\n", - " Loss: 0.0, Metrics: {'eval_loss': 0.0, 'eval_accuracy': 0.659061558329746, 'eval_auc': 0.9768702703059333}\n", - "\n", - "\n", - "📋 [Round 2] Client f0551378b0fb4e2d8387762bce46a59c Properties: {'features': 'Area,MajorAxisLength,MinorAxisLength,Eccentricity,ConvexArea,Extent,Perimeter', 'num_features': 7, 'total_memory_gb': 62.72, 'os_type': 'Linux', 'cpu_physical_cores': 12, 'target': 'label', 'classes': '0.0,1.0', 'label_distribution': '1.0:7650,0.0:3961', 'cpu_logical_cores': 24, 'gpu_driver_present': 'True', 'num_samples': 9288, 'gpu_count': 0, 'hostname': 'dinf-medomi-05b', 'model_type': 'xgb'}\n", - "\n", - "[Server] 🔄 Round 2 - Client Training Metrics:\n", - "[Server] ✅ Round 2 - Aggregated Training Metrics: {}\n", - "\n", - "\n", - "[Server] 📊 Round 2 - Client Evaluation Metrics:\n", - " CEM Round 2 Client:f0551378b0fb4e2d8387762bce46a59c: {'eval_auc': 0.9768702703059333, 'eval_accuracy': 0.659061558329746}\n", - "[Server] ✅ Round 2 - Aggregated Evaluation Metrics:\n", - " Loss: 0.0, Metrics: {'eval_loss': 0.0, 'eval_accuracy': 0.659061558329746, 'eval_auc': 0.9768702703059333}\n", - "\n", - "\n", - "📋 [Round 3] Client f0551378b0fb4e2d8387762bce46a59c Properties: {'features': 'Area,MajorAxisLength,MinorAxisLength,Eccentricity,ConvexArea,Extent,Perimeter', 'num_features': 7, 'total_memory_gb': 62.72, 'os_type': 'Linux', 'cpu_physical_cores': 12, 'target': 'label', 'classes': '0.0,1.0', 'label_distribution': '1.0:7650,0.0:3961', 'cpu_logical_cores': 24, 'gpu_driver_present': 'True', 'num_samples': 9288, 'gpu_count': 0, 'hostname': 'dinf-medomi-05b', 'model_type': 'xgb'}\n", - "\n", - "[Server] 🔄 Round 3 - Client Training Metrics:\n", - "[Server] ✅ Round 3 - Aggregated Training Metrics: {}\n", - "\n", - "[Server] ⚠️ Skipped checkpoint: aggregate_fit returned None (no aggregated parameters this round).\n", - "\n", - "[Server] 📊 Round 3 - Client Evaluation Metrics:\n", - " CEM Round 3 Client:f0551378b0fb4e2d8387762bce46a59c: {'eval_auc': 0.9768702703059333, 'eval_accuracy': 0.659061558329746}\n", - "[Server] ✅ Round 3 - Aggregated Evaluation Metrics:\n", - " Loss: 0.0, Metrics: {'eval_loss': 0.0, 'eval_accuracy': 0.659061558329746, 'eval_auc': 0.9768702703059333}\n", - "\n", - "\n", - "📋 [Round 4] Client f0551378b0fb4e2d8387762bce46a59c Properties: {'features': 'Area,MajorAxisLength,MinorAxisLength,Eccentricity,ConvexArea,Extent,Perimeter', 'num_features': 7, 'total_memory_gb': 62.72, 'os_type': 'Linux', 'cpu_physical_cores': 12, 'target': 'label', 'classes': '0.0,1.0', 'label_distribution': '1.0:7650,0.0:3961', 'cpu_logical_cores': 24, 'gpu_driver_present': 'True', 'num_samples': 9288, 'gpu_count': 0, 'hostname': 'dinf-medomi-05b', 'model_type': 'xgb'}\n", - "\n", - "[Server] 🔄 Round 4 - Client Training Metrics:\n", - "[Server] ✅ Round 4 - Aggregated Training Metrics: {}\n", - "\n", - "\n", - "[Server] 📊 Round 4 - Client Evaluation Metrics:\n", - " CEM Round 4 Client:f0551378b0fb4e2d8387762bce46a59c: {'eval_auc': 0.9768702703059333, 'eval_accuracy': 0.659061558329746}\n", - "[Server] ✅ Round 4 - Aggregated Evaluation Metrics:\n", - " Loss: 0.0, Metrics: {'eval_loss': 0.0, 'eval_accuracy': 0.659061558329746, 'eval_auc': 0.9768702703059333}\n", - "\n", - "\n", - "📋 [Round 5] Client f0551378b0fb4e2d8387762bce46a59c Properties: {'features': 'Area,MajorAxisLength,MinorAxisLength,Eccentricity,ConvexArea,Extent,Perimeter', 'num_features': 7, 'total_memory_gb': 62.72, 'os_type': 'Linux', 'cpu_physical_cores': 12, 'target': 'label', 'classes': '0.0,1.0', 'label_distribution': '1.0:7650,0.0:3961', 'cpu_logical_cores': 24, 'gpu_driver_present': 'True', 'num_samples': 9288, 'gpu_count': 0, 'hostname': 'dinf-medomi-05b', 'model_type': 'xgb'}\n", - "\n", - "[Server] 🔄 Round 5 - Client Training Metrics:\n", - "[Server] ✅ Round 5 - Aggregated Training Metrics: {}\n", - "\n", - "\n", - "[Server] 📊 Round 5 - Client Evaluation Metrics:\n", - " CEM Round 5 Client:f0551378b0fb4e2d8387762bce46a59c: {'eval_auc': 0.9768702703059333, 'eval_accuracy': 0.659061558329746}\n", - "[Server] ✅ Round 5 - Aggregated Evaluation Metrics:\n", - " Loss: 0.0, Metrics: {'eval_loss': 0.0, 'eval_accuracy': 0.659061558329746, 'eval_auc': 0.9768702703059333}\n", - "\n", - "\n", - "📋 [Round 6] Client f0551378b0fb4e2d8387762bce46a59c Properties: {'features': 'Area,MajorAxisLength,MinorAxisLength,Eccentricity,ConvexArea,Extent,Perimeter', 'num_features': 7, 'total_memory_gb': 62.72, 'os_type': 'Linux', 'cpu_physical_cores': 12, 'target': 'label', 'classes': '0.0,1.0', 'label_distribution': '1.0:7650,0.0:3961', 'cpu_logical_cores': 24, 'gpu_driver_present': 'True', 'num_samples': 9288, 'gpu_count': 0, 'hostname': 'dinf-medomi-05b', 'model_type': 'xgb'}\n", - "\n", - "[Server] 🔄 Round 6 - Client Training Metrics:\n", - "[Server] ✅ Round 6 - Aggregated Training Metrics: {}\n", - "\n", - "[Server] ⚠️ Skipped checkpoint: aggregate_fit returned None (no aggregated parameters this round).\n", - "\n", - "[Server] 📊 Round 6 - Client Evaluation Metrics:\n", - " CEM Round 6 Client:f0551378b0fb4e2d8387762bce46a59c: {'eval_auc': 0.9768702703059333, 'eval_accuracy': 0.659061558329746}\n", - "[Server] ✅ Round 6 - Aggregated Evaluation Metrics:\n", - " Loss: 0.0, Metrics: {'eval_loss': 0.0, 'eval_accuracy': 0.659061558329746, 'eval_auc': 0.9768702703059333}\n", - "\n", - "\n", - "📋 [Round 7] Client f0551378b0fb4e2d8387762bce46a59c Properties: {'features': 'Area,MajorAxisLength,MinorAxisLength,Eccentricity,ConvexArea,Extent,Perimeter', 'num_features': 7, 'total_memory_gb': 62.72, 'os_type': 'Linux', 'cpu_physical_cores': 12, 'target': 'label', 'classes': '0.0,1.0', 'label_distribution': '1.0:7650,0.0:3961', 'cpu_logical_cores': 24, 'gpu_driver_present': 'True', 'num_samples': 9288, 'gpu_count': 0, 'hostname': 'dinf-medomi-05b', 'model_type': 'xgb'}\n", - "\n", - "[Server] 🔄 Round 7 - Client Training Metrics:\n", - "[Server] ✅ Round 7 - Aggregated Training Metrics: {}\n", - "\n", - "\n", - "[Server] 📊 Round 7 - Client Evaluation Metrics:\n", - " CEM Round 7 Client:f0551378b0fb4e2d8387762bce46a59c: {'eval_auc': 0.9768702703059333, 'eval_accuracy': 0.659061558329746}\n", - "[Server] ✅ Round 7 - Aggregated Evaluation Metrics:\n", - " Loss: 0.0, Metrics: {'eval_loss': 0.0, 'eval_accuracy': 0.659061558329746, 'eval_auc': 0.9768702703059333}\n", - "\n", - "\n", - "📋 [Round 8] Client f0551378b0fb4e2d8387762bce46a59c Properties: {'features': 'Area,MajorAxisLength,MinorAxisLength,Eccentricity,ConvexArea,Extent,Perimeter', 'num_features': 7, 'total_memory_gb': 62.72, 'os_type': 'Linux', 'cpu_physical_cores': 12, 'target': 'label', 'classes': '0.0,1.0', 'label_distribution': '1.0:7650,0.0:3961', 'cpu_logical_cores': 24, 'gpu_driver_present': 'True', 'num_samples': 9288, 'gpu_count': 0, 'hostname': 'dinf-medomi-05b', 'model_type': 'xgb'}\n", - "\n", - "[Server] 🔄 Round 8 - Client Training Metrics:\n", - "[Server] ✅ Round 8 - Aggregated Training Metrics: {}\n", - "\n", - "\n", - "[Server] 📊 Round 8 - Client Evaluation Metrics:\n", - " CEM Round 8 Client:f0551378b0fb4e2d8387762bce46a59c: {'eval_auc': 0.9768702703059333, 'eval_accuracy': 0.659061558329746}\n", - "[Server] ✅ Round 8 - Aggregated Evaluation Metrics:\n", - " Loss: 0.0, Metrics: {'eval_loss': 0.0, 'eval_accuracy': 0.659061558329746, 'eval_auc': 0.9768702703059333}\n", - "\n", - "\n", - "📋 [Round 9] Client f0551378b0fb4e2d8387762bce46a59c Properties: {'features': 'Area,MajorAxisLength,MinorAxisLength,Eccentricity,ConvexArea,Extent,Perimeter', 'num_features': 7, 'total_memory_gb': 62.72, 'os_type': 'Linux', 'cpu_physical_cores': 12, 'target': 'label', 'classes': '0.0,1.0', 'label_distribution': '1.0:7650,0.0:3961', 'cpu_logical_cores': 24, 'gpu_driver_present': 'True', 'num_samples': 9288, 'gpu_count': 0, 'hostname': 'dinf-medomi-05b', 'model_type': 'xgb'}\n", - "\n", - "[Server] 🔄 Round 9 - Client Training Metrics:\n", - "[Server] ✅ Round 9 - Aggregated Training Metrics: {}\n", - "\n", - "[Server] ⚠️ Skipped checkpoint: aggregate_fit returned None (no aggregated parameters this round).\n", - "\n", - "[Server] 📊 Round 9 - Client Evaluation Metrics:\n", - " CEM Round 9 Client:f0551378b0fb4e2d8387762bce46a59c: {'eval_auc': 0.9768702703059333, 'eval_accuracy': 0.659061558329746}\n", - "[Server] ✅ Round 9 - Aggregated Evaluation Metrics:\n", - " Loss: 0.0, Metrics: {'eval_loss': 0.0, 'eval_accuracy': 0.659061558329746, 'eval_auc': 0.9768702703059333}\n", - "\n", - "\n", - "📋 [Round 10] Client f0551378b0fb4e2d8387762bce46a59c Properties: {'features': 'Area,MajorAxisLength,MinorAxisLength,Eccentricity,ConvexArea,Extent,Perimeter', 'num_features': 7, 'total_memory_gb': 62.72, 'os_type': 'Linux', 'cpu_physical_cores': 12, 'target': 'label', 'classes': '0.0,1.0', 'label_distribution': '1.0:7650,0.0:3961', 'cpu_logical_cores': 24, 'gpu_driver_present': 'True', 'num_samples': 9288, 'gpu_count': 0, 'hostname': 'dinf-medomi-05b', 'model_type': 'xgb'}\n", - "\n", - "[Server] 🔄 Round 10 - Client Training Metrics:\n", - "[Server] ✅ Round 10 - Aggregated Training Metrics: {}\n", - "\n", - "[Server] ⚠️ Skipped checkpoint: aggregate_fit returned None (no aggregated parameters this round).\n", - "\n", - "[Server] 📊 Round 10 - Client Evaluation Metrics:\n", - " CEM Round 10 Client:f0551378b0fb4e2d8387762bce46a59c: {'eval_auc': 0.9768702703059333, 'eval_accuracy': 0.659061558329746}\n", - "[Server] ✅ Round 10 - Aggregated Evaluation Metrics:\n", - " Loss: 0.0, Metrics: {'eval_loss': 0.0, 'eval_accuracy': 0.659061558329746, 'eval_auc': 0.9768702703059333}\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "from MEDfl.rw.server import FederatedServer, Strategy\n", "\n", @@ -309,14 +44,30 @@ "min_fit_clients=1,\n", "min_evaluate_clients=1,\n", "min_available_clients=1 , \n", - "local_epochs=1 , \n", + "local_epochs=10 , \n", "threshold=0.5, \n", "learning_rate=0.01, \n", "optimizer_name=\"SGD\", \n", "saveOnRounds=3, \n", "savingPath=\"./\", \n", "total_rounds=10, \n", - "datasetConfig={\"isGlobal\" : True, \"globalConfig\" : {\"target\" : \"lable\" , \"testFrac\" : 0.2}}\n", + "# datasetConfig={\n", + "# # Global-only schema\n", + "# \"target\": \"label\", # optional, default = last CSV column\n", + "# \"testFrac\": 0.20, # fraction for test split\n", + "# \"valFrac\": 0.10, # fraction for validation split\n", + "# \"selectedColumns\": [ # list[str] OR list[{label,name}]\n", + "# {\"label\": \"Area\", \"name\": \"Area\"},\n", + "# {\"label\": \"Perimeter\", \"name\": \"Perimeter\"},\n", + "# {\"label\": \"Eccentricity\", \"name\": \"Eccentricity\"},\n", + "# {\"label\": \"MajorAxisLength\", \"name\": \"MajorAxisLength\"},\n", + "# {\"label\": \"MinorAxisLength\", \"name\": \"MinorAxisLength\"},\n", + "# {\"label\": \"ConvexArea\", \"name\": \"ConvexArea\"},\n", + "# {\"label\": \"Extent\", \"name\": \"Extent\"},\n", + " \n", + "# ],\n", + "# },\n", + "\n", ")\n", "\n", "\n", @@ -335,6 +86,132 @@ "id": "a8f3a295", "metadata": {}, "outputs": [], + "source": [ + "# run_server.py\n", + "from MEDfl.rw.server import FederatedServer, Strategy\n", + "\n", + "custom_strategy = Strategy(\n", + " name=\"FedAvg\",\n", + " fraction_fit=1.0,\n", + " min_fit_clients=1,\n", + " min_evaluate_clients=1,\n", + " min_available_clients=1,\n", + " local_epochs=10,\n", + " threshold=0.5,\n", + " learning_rate=0.01,\n", + " optimizer_name=\"SGD\",\n", + " saveOnRounds=3,\n", + " savingPath=\"./\",\n", + " total_rounds=10,\n", + "\n", + " # NEW — enforce the schema for every client\n", + " features=\"MajorAxisLength,Area,Eccentricity,ConvexArea\", # comma-separated cols in every client CSV\n", + " target=\"label\", # the target column name\n", + "\n", + " # NEW — defaults for all clients (clients may override)\n", + " val_fraction=0.15,\n", + " test_fraction=0.10,\n", + ")\n", + "\n", + "server = FederatedServer(\n", + " host=\"0.0.0.0\",\n", + " port=8080,\n", + " num_rounds=10,\n", + " strategy=custom_strategy,\n", + ")\n", + "\n", + "if __name__ == \"__main__\":\n", + " server.start()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9d668d84", + "metadata": {}, + "outputs": [], + "source": [ + "# run_server.py\n", + "from MEDfl.rw.server import FederatedServer, Strategy\n", + "\n", + "test_ids = \",\".join(map(str, range(1500))) # IDs 0 to 99\n", + "test_ids = list(map(int, test_ids.split(\",\")))\n", + "\n", + "\n", + "client_fractions = {\n", + " \"dinf-medomi-05b\": {\n", + " \"val_fraction\": 0.1,\n", + " \"test_ids\" : list(range(1500))\n", + " }\n", + "}\n", + "\n", + "custom_strategy = Strategy(\n", + " name=\"FedAvg\",\n", + " fraction_fit=1.0,\n", + " min_fit_clients=1,\n", + " min_evaluate_clients=1,\n", + " min_available_clients=1,\n", + " local_epochs=10,\n", + " threshold=0.5,\n", + " learning_rate=0.01,\n", + " optimizer_name=\"SGD\",\n", + " saveOnRounds=3,\n", + " savingPath=\"./\",\n", + " total_rounds=10,\n", + "\n", + " # NEW — enforce the schema for every client\n", + " features=\"MajorAxisLength,Area,Eccentricity,ConvexArea\", # comma-separated cols in every client CSV\n", + " target=\"label\", # the target column name\n", + "\n", + " # NEW — defaults for all clients (clients may override)\n", + " val_fraction=0.15,\n", + " test_fraction=0.10,\n", + "\n", + " split_mode=\"per_client\" , \n", + " id_col = \"id\" , \n", + "\n", + " client_fractions=client_fractions\n", + ")\n", + "\n", + "server = FederatedServer(\n", + " host=\"0.0.0.0\",\n", + " port=8080,\n", + " num_rounds=10,\n", + " strategy=custom_strategy,\n", + ")\n", + "\n", + "if __name__ == \"__main__\":\n", + " server.start()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "b4229405", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'dinf-medomi-05b'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import socket\n", + "socket.gethostname()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "faf04b0a", + "metadata": {}, + "outputs": [], "source": [] } ], diff --git a/setup.py b/setup.py index 7b05868..c811fa0 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ setup( name="MEDfl", - version="2.0.5", + version="2.0.5.dev4", author="MEDomics consortium", author_email="medomics.info@gmail.com", description="Python Open-source package for simulating federated learning and differential privacy",