diff --git a/.gitignore b/.gitignore index 595392e7..92203703 100644 --- a/.gitignore +++ b/.gitignore @@ -5,7 +5,7 @@ AD_Miner/sources/modules/temporary* *node_modules* /tests /test - +evolution_data/ # Byte-compiled / optimized / DLL files __pycache__/ @@ -170,6 +170,3 @@ cython_debug/ # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ - -# VS Code -.vscode/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..1217178f --- /dev/null +++ b/Dockerfile @@ -0,0 +1,11 @@ +FROM python:3.11-slim + +# Set the working directory in the container +WORKDIR /tmp + +# Install necessary system dependencies for Git +RUN apt-get update && apt-get install -y git \ + && rm -rf /var/lib/apt/lists/* + +# Install AD-Miner from the Git repository +RUN pip install --no-cache-dir 'git+https://github.com/AD-Security/AD_Miner.git' diff --git a/README.md b/README.md index 53ab9935..537cfb89 100755 --- a/README.md +++ b/README.md @@ -82,7 +82,7 @@ To run AD Miner, you first need a neo4j database which contains the Active Direc The easier way is to do the following command using `pipx`: ```shell -pipx install 'git+https://github.com/Mazars-Tech/AD_Miner.git' +pipx install 'git+https://github.com/AD-Security/AD_Miner.git' ``` ADMiner is also available on some Linux distributions: @@ -92,6 +92,26 @@ ADMiner is also available on some Linux distributions: - BlackArch: `pacman -S ad-miner` - NixOS: `nix-env -iA nixos.ad-miner` +A Docker image is available to build. Build the image with the following commmand: + +```sh +docker build -t ad-miner . +``` + +To run this on Windows with the BloodHound Community Edition data, use the commands below: + +```sh +docker run -v ${PWD}:/tmp ad-miner AD-miner -b bolt://host.docker.internal:7687 -u neo4j -p mypassword -cf YOUR_PREFIX +``` + +To run this on Linux with the BloodHound Community Edition data, use the commands below: + +```sh +docker run -v ${PWD}:/tmp --network host ad-miner AD-miner -b bolt://localhost:7687 -u neo4j -p mypassword -cf YOUR_PREFIX +``` + +Note that mounting the volume with `-v` is critical to get the output of the data. This assumes that the BHCE server is running on the Docker host with default settings. + ## Usage ## Run the tool: @@ -154,13 +174,15 @@ In the graph pages, you can right-click on the graph nodes to cluster them or to If you have multiple AD-Miner reports over time, you can easily track the evolution with the `--evolution` argument: each AD-Miner report generates a JSON data file alongside the `index.html` file. You just need to gather these different JSON files into a single folder and specify the path to that folder after the `--evolution` argument. -A tab called 'Evolution over time' then appears on the main page. + AD-miner -c -cf My_Report -b bolt://server:7687 -u neo4j -p mypassword -r 180 --evolution evolution_folder/ + +An 'Evolution over time' tab appears on the main page, providing evolution graphs for each category (Permissions, Passwords, Kerberos, and Misc).
diff --git a/ad_miner/__main__.py b/ad_miner/__main__.py
index 456bc361..7d6baa6c 100644
--- a/ad_miner/__main__.py
+++ b/ad_miner/__main__.py
@@ -8,6 +8,9 @@
import traceback
import signal
import sys
+import subprocess
+import requests
+from importlib.metadata import version, PackageNotFoundError
# Local library imports
from ad_miner.sources.modules import logger, utils, generic_formating, main_page
@@ -142,6 +145,48 @@ def prepare_render(arguments) -> None:
shutil.copy2(js_file, folder_name / "js")
+def get_version_and_commit():
+ # Either using pip(x) you get the version number or in dev environment the git commit
+ try:
+ ver = version("ad-miner")
+ except PackageNotFoundError:
+ ver = "unknown"
+
+ commit = ""
+ try:
+ root = Path(__file__).resolve().parent
+ commit = subprocess.check_output(
+ ["git", "rev-parse", "--short", "HEAD"], cwd=root, stderr=subprocess.DEVNULL, text=True
+ ).strip()
+ except Exception:
+ commit = "unknown"
+ return ver, commit
+
+
+def get_last_version():
+ try:
+ r = requests.get("https://www.ad-miner.com/version.json", timeout=0.5)
+ data = r.json()
+ return data["lastversion"]
+ except Exception:
+ return "unreachable"
+
+
+def check_version(lastversion, currentversion):
+ try:
+ last = tuple(map(int, lastversion.lstrip("v").split(".")))
+ current = tuple(map(int, currentversion.lstrip("v").split(".")))
+
+ if last > current:
+ logger.print_error(f"New AD Miner version {lastversion} available.")
+ logger.print_error(
+ "Update with pipx or manually on https://github.com/AD-Security/AD_Miner"
+ )
+ return
+ except Exception:
+ return
+
+
def main() -> None:
"""Main execution function for the script."""
start = time.time()
@@ -163,6 +208,12 @@ def main() -> None:
prepare_render(arguments)
+ AD_miner_version, AD_miner_commit = get_version_and_commit()
+ arguments.version = AD_miner_version
+ arguments.commit = AD_miner_commit
+
+ check_version(get_last_version(), AD_miner_version)
+
neo4j_version, extract_date, total_objects, number_relations, boolean_azure = pre_request(
arguments
)
diff --git a/ad_miner/sources/html/bootstrap/css/custom.css b/ad_miner/sources/html/bootstrap/css/custom.css
index 8338397a..7e407995 100755
--- a/ad_miner/sources/html/bootstrap/css/custom.css
+++ b/ad_miner/sources/html/bootstrap/css/custom.css
@@ -389,4 +389,9 @@ a:hover{
.percentage-evolution {
font-size: small;
font-weight: 800;
+}
+
+.ag-cell-value i.bi {
+ margin-right: 4px;
+ vertical-align: middle;
}
\ No newline at end of file
diff --git a/ad_miner/sources/html/components/grid/grid_template.html b/ad_miner/sources/html/components/grid/grid_template.html
index 4945fb76..4719b92f 100755
--- a/ad_miner/sources/html/components/grid/grid_template.html
+++ b/ad_miner/sources/html/components/grid/grid_template.html
@@ -99,10 +99,18 @@
rowData2.push(new_dico);
}
}
+ else if (window.location.href.includes('computers_with_administrators_details')) {
+ if (rowData[i]['Computer'] == parameter) {
+ var new_dico = {};
+ new_dico['Users'] = rowData[i]['Users'];
+ new_dico['Distinguished Name'] = rowData[i]['Distinguished Name'];
+ rowData2.push(new_dico);
+ }
+ }
else if (window.location.href.includes('users_rdp_access') || window.location.href.includes('computers_list_of_rdp_users')) {
if (rowData[i][keys[0]].includes(parameter)) {
- const startSubstring = "
"; + const startSubstring = "
"; const endSubstring = "
"; const startIndex = rowData[i][keys[1]].indexOf(startSubstring) + startSubstring.length; @@ -130,7 +138,12 @@ } } } - columnDefs2 = [{field: parameter}]; + if (window.location.href.includes('computers_with_administrators_details')) { + columnDefs2 = [{field: 'Users'},{field: 'Distinguished Name'}] + } + else { + columnDefs2 = [{field: parameter}]; + } return [rowData2, columnDefs2]; } diff --git a/ad_miner/sources/html/templates/main_header.html b/ad_miner/sources/html/templates/main_header.html index 9257e76a..489fc9ec 100644 --- a/ad_miner/sources/html/templates/main_header.html +++ b/ad_miner/sources/html/templates/main_header.html @@ -125,7 +125,7 @@{self.users_rdp_access_2[key]}
", + "value": f"{len(self.users_rdp_access_2[key])} Users{self.users_rdp_access_2[key]}
", "link": f"computers_list_of_rdp_users.html?parameter={quote(str(key))}", "before_link": f'', } diff --git a/ad_miner/sources/modules/controls/computers_members_high_privilege.py b/ad_miner/sources/modules/controls/computers_members_high_privilege.py index 403896eb..69fc410f 100644 --- a/ad_miner/sources/modules/controls/computers_members_high_privilege.py +++ b/ad_miner/sources/modules/controls/computers_members_high_privilege.py @@ -45,9 +45,9 @@ def run(self): ) grid = Grid("List of computer admins") for d in self.computers_members_high_privilege: - d["domain"] = ' ' + d["domain"] - d["computer"] = ' ' + d["computer"] - d["group"] = ' ' + d["group"] + d["domain"] = '' + d["domain"] + d["computer"] = '' + d["computer"] + d["group"] = '' + d["group"] grid.setheaders(["domain", "computer", "group"]) grid.setData(self.computers_members_high_privilege) page.addComponent(grid) diff --git a/ad_miner/sources/modules/controls/computers_os_obsolete.py b/ad_miner/sources/modules/controls/computers_os_obsolete.py index 31083034..29565901 100644 --- a/ad_miner/sources/modules/controls/computers_os_obsolete.py +++ b/ad_miner/sources/modules/controls/computers_os_obsolete.py @@ -41,31 +41,33 @@ def run(self): for computer in self.list_computers_os_obsolete: if computer["Last logon in days"] < 90: # remove ghost computers computer["Domain"] = ( - ' ' + computer["Domain"] + '' + computer["Domain"] ) computer["Last logon"] = days_format(computer["Last logon in days"]) if ( "2008" in computer["Operating system"] or "2003" in computer["Operating system"] or "2012" in computer["Operating system"] + or "WINDOWS EMBEDDED STANDARD SERVICE PACK 1" in computer["Operating system"].upper() ): # Add icons whether it's a computer or a server computer["Operating system"] = ( - ' ' + computer["Operating system"] + '' + computer["Operating system"] ) computer["name"] = ( - ' ' + computer["name"] + '' + computer["name"] ) if ( "2000" in computer["Operating system"] - or "XP" in computer["Operating system"] - or "Windows 7" in computer["Operating system"] + or "XP" in computer["Operating system"].upper() + or "WINDOWS 7" in computer["Operating system"].upper() + or "VISTA" in computer["Operating system"].upper() ): computer["Operating system"] = ( - ' ' + '' + computer["Operating system"] ) computer["name"] = ( - ' ' + computer["name"] + '' + computer["name"] ) cleaned_data.append(computer) diff --git a/ad_miner/sources/modules/controls/computers_without_laps.py b/ad_miner/sources/modules/controls/computers_without_laps.py index 9f59bb98..7be455a4 100644 --- a/ad_miner/sources/modules/controls/computers_without_laps.py +++ b/ad_miner/sources/modules/controls/computers_without_laps.py @@ -31,18 +31,7 @@ def run(self): return if len(self.list_total_computers) != 0: - stat_LAPS = round( - 100 - * len( - [ - computer_has_laps - for computer_has_laps in self.computers_nb_has_laps - if "ENABLED" in computer_has_laps["LAPS"].upper() - or "TRUE" in computer_has_laps["LAPS"].upper() - ] - ) - / (len(self.computers_nb_has_laps) + 0.001) - ) + stat_LAPS = round(100 * len([computer_has_laps for computer_has_laps in self.computers_nb_has_laps if "ENABLED" in computer_has_laps["LAPS"].upper() or "TRUE" in computer_has_laps["LAPS"].upper()]) / (len(self.computers_nb_has_laps) + 0.001)) else: stat_LAPS = 0 @@ -61,25 +50,17 @@ def run(self): for computer in self.computers_nb_has_laps: tmp_dict = {} # If value is None - if not computer.get("lastLogon"): + if not "lastLogon" in computer.keys(): continue # Exclude ghost computers (last logon > 90 days) if computer["lastLogon"] < 90: - tmp_dict["domain"] = ( - ' ' + computer["domain"] - ) + tmp_dict["domain"] = '' + computer["domain"] tmp_dict["Last logon"] = days_format(computer["lastLogon"]) - tmp_dict["name"] = ( - ' ' + computer["name"] - ) + tmp_dict["name"] = '' + computer["name"] if computer["LAPS"] == "false": - tmp_dict["LAPS"] = ( - ' Disabled' - ) + tmp_dict["LAPS"] = 'Disabled' else: - tmp_dict["LAPS"] = ( - ' Enabled' - ) + tmp_dict["LAPS"] = 'Enabled' cleaned_data.append(tmp_dict) self.computers_nb_has_laps = cleaned_data grid.setData(cleaned_data) diff --git a/ad_miner/sources/modules/controls/cross_domain_admin_privileges.py b/ad_miner/sources/modules/controls/cross_domain_admin_privileges.py index 42d71e55..4b76842e 100644 --- a/ad_miner/sources/modules/controls/cross_domain_admin_privileges.py +++ b/ad_miner/sources/modules/controls/cross_domain_admin_privileges.py @@ -95,7 +95,7 @@ def run(self): user = key tmp_data = {} - tmp_data["user"] = ' ' + user + tmp_data["user"] = '' + user grid_list_local_admin_targets_data = [] grid_list_domain_admin_targets_data = [] # create the grid @@ -114,7 +114,7 @@ def run(self): local_distinct_ends = [] for domain in data_local_admins[key]: list_local_admin_targets_tmp_data = { - "domain": ' ' + domain + "domain": '' + domain } numberofpaths = 0 for path in data_local_admins[key][domain]: @@ -126,15 +126,12 @@ def run(self): if last_node_name not in local_distinct_ends: local_distinct_ends.append(last_node_name) sortClass = last_node_name.zfill(6) - list_local_admin_targets_tmp_data_copy["target"] = ( - grid_data_stringify( - { - "value": f"{last_node_name}", - "link": "%s_paths_cross_domain_local_admin.html" - % user, - "before_link": f'', - } - ) + list_local_admin_targets_tmp_data_copy["target"] = grid_data_stringify( + { + "value": f"{last_node_name}", + "link": "%s_paths_cross_domain_local_admin.html" % user, + "before_link": f'', + } ) grid_list_local_admin_targets_data.append( @@ -146,7 +143,7 @@ def run(self): { "value": f"{nb_local_distinct_ends} computers impacted", "link": "%s_paths_cross_domain_local_admin.html" % user, - "before_link": f'', + "before_link": f'', } ) createGraphPage( @@ -194,7 +191,7 @@ def run(self): domain_distinct_ends = [] for domain in data_domain_admins[key]: list_domain_admin_targets_tmp_data = { - "domain": ' ' + domain + "domain": '' + domain } for path in data_domain_admins[key][domain]: @@ -208,15 +205,12 @@ def run(self): domain_distinct_ends.append(last_node_name) sortClass = last_node_name.zfill(6) - list_domain_admin_targets_tmp_data_copy["target"] = ( - grid_data_stringify( - { - "value": f"{last_node_name}", - "link": "%s_paths_cross_domain_domain_admin.html" - % user, - "before_link": f'', - } - ) + list_domain_admin_targets_tmp_data_copy["target"] = grid_data_stringify( + { + "value": f"{last_node_name}", + "link": "%s_paths_cross_domain_domain_admin.html" % user, + "before_link": f'', + } ) grid_list_domain_admin_targets_data.append( @@ -229,7 +223,7 @@ def run(self): { "value": f"{len(list(data_domain_admins[key].keys()))} domains impacted", "link": "%s_paths_cross_domain_domain_admin.html" % user, - "before_link": f'', + "before_link": f'', } ) createGraphPage( diff --git a/ad_miner/sources/modules/controls/da_to_da.py b/ad_miner/sources/modules/controls/da_to_da.py index 4666df24..cf284a1a 100644 --- a/ad_miner/sources/modules/controls/da_to_da.py +++ b/ad_miner/sources/modules/controls/da_to_da.py @@ -57,7 +57,7 @@ def run(self): headers.append(domain) graphDatas[domain] = {} pathLengthss.append( - {"FROM / TO": ' ' + domain, domain: "-"} + {"FROM / TO": '' + domain, domain: "-"} ) for path in paths: # headers and pathLengths share the same index and it is cheaper to use headers here @@ -70,7 +70,7 @@ def run(self): graphDatas[unknown_domain] = {} pathLengthss.append( { - "FROM / TO": ' ' + unknown_domain, + "FROM / TO": '' + unknown_domain, unknown_domain: "-", } ) @@ -129,7 +129,7 @@ def run(self): ] = f"{row[key]['value']} path{'s' if row[key]['value'] > 1 else ''}" row[key][ "before_link" - ] = f"" + ] = f"" row[key] = grid_data_stringify(row[key]) # Add some text to empty cells for header in headers: diff --git a/ad_miner/sources/modules/controls/dom_admin_on_non_dc.py b/ad_miner/sources/modules/controls/dom_admin_on_non_dc.py index 6dcc10bf..e9603a49 100644 --- a/ad_miner/sources/modules/controls/dom_admin_on_non_dc.py +++ b/ad_miner/sources/modules/controls/dom_admin_on_non_dc.py @@ -57,14 +57,14 @@ def run(self): data = [] for da in dico.keys(): tmp = {} - tmp["Domain"] = ' ' + dico[da]["domain"] - tmp["Domain Admin"] = ' ' + da + tmp["Domain"] = '' + dico[da]["domain"] + tmp["Domain Admin"] = '' + da nb_computers = len(dico[da]["computers"]) tmp["Computers"] = grid_data_stringify( { "link": f"dom_admin_on_non_dc_list_of_{quote(str(da.replace(' ', '_')))}.html", "value": f'{nb_computers} computer{"s" if nb_computers > 1 else ""} impacted', - "before_link": f" ", + "before_link": f"", } ) nb_domains = len(dico[da]["domains_impacted"].keys()) @@ -72,7 +72,7 @@ def run(self): { "link": f"dom_admin_on_non_dc_domain_list_of_{quote(str(da.replace(' ', '_')))}.html", "value": f'{nb_domains} domain{"s" if nb_domains > 1 else ""} impacted', - "before_link": f" ", + "before_link": f"", } ) nb_paths = len(dico[da]["paths"]) @@ -80,7 +80,7 @@ def run(self): { "link": f"dom_admin_on_non_dc_paths_from_{quote(str(da.replace(' ', '_')))}.html", "value": f'{nb_paths} path{"s" if nb_paths > 1 else ""}', - "before_link": f" ", + "before_link": f"", } ) data.append(tmp) @@ -103,7 +103,7 @@ def run(self): ) computer_list_grid.setheaders(["Computer"]) computer_list_data = [ - {"Computer": ' ' + c} + {"Computer": '' + c} for c in dico[da]["computers"] ] computer_list_grid.setData(computer_list_data) @@ -121,9 +121,7 @@ def run(self): f"Domains of computers storing sensitive connection informations of {da}" ) domain_list_grid.setheaders(["Domain"]) - domain_list_data = [ - {"Domain": ' ' + c} for c in domain_list - ] + domain_list_data = [{"Domain": '' + c} for c in domain_list] domain_list_grid.setData(domain_list_data) domain_list_page.addComponent(domain_list_grid) domain_list_page.render() @@ -137,7 +135,9 @@ def run(self): else 0 ) - self.name_description = f"{self.data} Tier-0 sessions on non-Tier-0 computers" + self.name_description = ( + f"{self.data} Tier-$0$ sessions on non-Tier-$0$ computers" + ) def get_rating(self) -> int: return presence_of(self.users_domain_admin_on_nondc) diff --git a/ad_miner/sources/modules/controls/dormants_accounts.py b/ad_miner/sources/modules/controls/dormants_accounts.py index 1ca607d9..43e0956a 100644 --- a/ad_miner/sources/modules/controls/dormants_accounts.py +++ b/ad_miner/sources/modules/controls/dormants_accounts.py @@ -46,23 +46,24 @@ def run(self): self.get_dico_description(), ) grid = Grid("Dormants accounts") - grid.setheaders(["domain", "name", "last logon", "Account Creation Date"]) + grid.setheaders(["domain", "name", "display name", "last logon", "Account Creation Date", "distinguished name"]) data = [] for dict in self.users_dormant_accounts: - tmp_data = {"domain": ' ' + dict["domain"]} + tmp_data = {"domain": '' + dict["domain"]} tmp_data["name"] = ( ( - ' ' + '' + dict["name"] ) if dict["name"] in self.admin_list - else ' ' + dict["name"] + else '' + dict["name"] ) - + tmp_data["display name"] = dict["displayname"] if dict["displayname"] else 'N/A' tmp_data["last logon"] = days_format(dict["days"]) tmp_data["Account Creation Date"] = days_format(dict["accountCreationDate"]) + tmp_data["distinguished name"] = dict["distinguishedname"] data.append(tmp_data) diff --git a/ad_miner/sources/modules/controls/empty_groups.py b/ad_miner/sources/modules/controls/empty_groups.py index d796719d..1c494a16 100644 --- a/ad_miner/sources/modules/controls/empty_groups.py +++ b/ad_miner/sources/modules/controls/empty_groups.py @@ -35,7 +35,7 @@ def run(self): headers = ["Empty group", "Full Reference"] for d in self.empty_groups: - d["Empty group"] = ' ' + d["Empty group"] + d["Empty group"] = '' + d["Empty group"] grid.setheaders(headers) grid.setData(self.empty_groups) diff --git a/ad_miner/sources/modules/controls/empty_ous.py b/ad_miner/sources/modules/controls/empty_ous.py index a1c6af6b..fb32ee40 100644 --- a/ad_miner/sources/modules/controls/empty_ous.py +++ b/ad_miner/sources/modules/controls/empty_ous.py @@ -36,7 +36,7 @@ def run(self): for d in self.empty_ous: d["Empty Organizational Unit"] = ( - ' ' + d["Empty Organizational Unit"] + '' + d["Empty Organizational Unit"] ) grid.setheaders(headers) diff --git a/ad_miner/sources/modules/controls/graph_list_objects_rbcd.py b/ad_miner/sources/modules/controls/graph_list_objects_rbcd.py index 66b48e0a..2261cd62 100644 --- a/ad_miner/sources/modules/controls/graph_list_objects_rbcd.py +++ b/ad_miner/sources/modules/controls/graph_list_objects_rbcd.py @@ -130,10 +130,9 @@ def run(self): ).zfill(6) sub_tmp_data["Paths to DA"] = grid_data_stringify( { - "value": f'{len(self.rbcd_to_da_graphs[destination]["paths"])} path{"s" if len(self.rbcd_to_da_graphs[destination]["paths"]) > 1 else ""} to DA', - "link": "rbcd_target_%s_paths_to_da.html" - % quote(str(destination)), - "before_link": f'', + "value": f'{len(self.rbcd_to_da_graphs[destination]["paths"])} path{"s" if len(self.rbcd_to_da_graphs[destination]["paths"]) > 1 else ""} to DA ', + "link": "rbcd_target_%s_paths_to_da.html" % quote(str(destination)), + "before_link": f'', } ) else: @@ -160,16 +159,16 @@ def run(self): for object_name in list(self.rbcd_graphs.keys()): tmp_data = {} tmp_data["Domain"] = ( - ' ' + '' + self.rbcd_graphs[object_name]["domain"] ) - tmp_data["Name"] = ' ' + object_name + tmp_data["Name"] = '' + object_name sortClass1 = str(len(self.rbcd_graphs[object_name]["paths"])).zfill(6) tmp_data["Paths to targets"] = grid_data_stringify( { - "value": f'{len(self.rbcd_graphs[object_name]["paths"])} path{"s" if len(self.rbcd_graphs[object_name]["paths"]) > 1 else ""} to targets', + "value": f'{len(self.rbcd_graphs[object_name]["paths"])} path{"s" if len(self.rbcd_graphs[object_name]["paths"]) > 1 else ""} to targets ', "link": "%s_rbcd_graph.html" % quote(str(object_name)), - "before_link": f'', + "before_link": f'', } ) if self.rbcd_graphs[object_name]["nb_paths_to_da"] > 0: @@ -178,10 +177,10 @@ def run(self): ).zfill(6) tmp_data["Paths to DA"] = grid_data_stringify( { - "value": f'{self.rbcd_graphs[object_name]["nb_paths_to_da"]} path{"s" if self.rbcd_graphs[object_name]["nb_paths_to_da"] > 1 else ""} to DA', + "value": f'{self.rbcd_graphs[object_name]["nb_paths_to_da"]} path{"s" if self.rbcd_graphs[object_name]["nb_paths_to_da"] > 1 else ""} to DA ', "link": "graph_list_objects_rbcd_to_da_from_%s.html" % quote(str(object_name)), - "before_link": f'', + "before_link": f'', } ) else: diff --git a/ad_miner/sources/modules/controls/graph_path_objects_to_da.py b/ad_miner/sources/modules/controls/graph_path_objects_to_da.py index e6aeece0..5eb41699 100644 --- a/ad_miner/sources/modules/controls/graph_path_objects_to_da.py +++ b/ad_miner/sources/modules/controls/graph_path_objects_to_da.py @@ -48,9 +48,7 @@ def __init__(self, arguments, requests_results) -> None: } def run(self): - self.generatePathToDa() - self.data = ( len( list( @@ -82,10 +80,10 @@ def get_rating(self) -> int: return 1 for domain in self.computers_to_domain_admin: if len(self.computers_to_domain_admin[domain]) > 0: - return 1 + return 2 for domain in self.groups_to_domain_admin: if len(self.groups_to_domain_admin[domain]) > 0: - return 1 + return 3 return 5 def generatePathToDa( @@ -97,7 +95,11 @@ def generatePathToDa( for domain in self.domains: domain = domain[0] - if len(self.users_to_domain[domain]): + logger.print_debug(f"Generating paths to DA pages for {domain}") + + count_users = len(self.users_to_domain[domain]) + logger.print_debug(f"Trying to generate users to DA graph page with {count_users} paths") + if count_users: createGraphPage( self.arguments.cache_prefix, domain + f"_users_to_{file_variable}", @@ -106,7 +108,10 @@ def generatePathToDa( self.users_to_domain[domain], self.requests_results, ) - if len(self.computers_to_domain[domain]): + + count_computers = len(self.computers_to_domain[domain]) + logger.print_debug(f"Trying to generate computers to DA graph page with {count_computers} paths") + if count_computers: createGraphPage( self.arguments.cache_prefix, domain + f"_computers_to_{file_variable}", @@ -115,7 +120,10 @@ def generatePathToDa( self.computers_to_domain[domain], self.requests_results, ) - if len(self.groups_to_domain[domain]): + + count_groups = len(self.groups_to_domain[domain]) + logger.print_debug(f"Trying to generate groups to DA graph page with {count_groups} paths") + if count_groups: createGraphPage( self.arguments.cache_prefix, domain + f"_groups_to_{file_variable}", @@ -139,12 +147,12 @@ def count_object_from_path(list_of_paths): """ Count the numbers of object leading to DA instead of counting number of path. """ - entries = [] + entries = {} for path in list_of_paths: start = path.nodes[0].name if start not in entries: - entries.append(start) - return len(entries) + entries[start] = True + return len(entries.keys()) # generating graph object to da grid page = Page( @@ -167,7 +175,7 @@ def count_object_from_path(list_of_paths): domain = domain[0] tmp_data = {} - tmp_data[headers[0]] = ' ' + domain + tmp_data[headers[0]] = '' + domain count = count_object_from_path(self.users_to_domain[domain]) sortClass = str(count).zfill( @@ -176,14 +184,14 @@ def count_object_from_path(list_of_paths): if count != 0: tmp_data[headers[1]] = grid_data_stringify( { - "value": f"{count} ( {len(self.users_to_domain_admin[domain])})", + "value": f"{count} ({len(self.users_to_domain_admin[domain])})", "link": "%s_users_to_da.html" % quote(str(domain)), - "before_link": f" ", + "before_link": f"", } ) else: tmp_data[headers[1]] = ( - " %s ( %s)" + "%s (%s)" % (sortClass, count, len(self.users_to_domain_admin[domain])) ) self.total_object += count @@ -195,14 +203,14 @@ def count_object_from_path(list_of_paths): if count != 0: tmp_data[headers[2]] = grid_data_stringify( { - "value": f"{count} ( {len(self.computers_to_domain_admin[domain])})", + "value": f"{count} ({len(self.computers_to_domain_admin[domain])})", "link": "%s_computers_to_da.html" % quote(str(domain)), "before_link": f"", } ) else: tmp_data[headers[2]] = ( - " %s ( %s)" + "%s (%s)" % (sortClass, count, len(self.computers_to_domain_admin[domain])) ) self.total_object += count @@ -214,14 +222,14 @@ def count_object_from_path(list_of_paths): if count != 0: tmp_data[headers[3]] = grid_data_stringify( { - "value": f"{count} ( {len(self.groups_to_domain_admin[domain])})", + "value": f"{count} ({len(self.groups_to_domain_admin[domain])})", "link": "%s_groups_to_da.html" % quote(str(domain)), "before_link": f"", } ) else: tmp_data[headers[3]] = ( - " %s ( %s)" + "%s (%s)" % (sortClass, count, len(self.groups_to_domain_admin[domain])) ) self.total_object += count diff --git a/ad_miner/sources/modules/controls/graph_path_objects_to_ou_handlers.py b/ad_miner/sources/modules/controls/graph_path_objects_to_ou_handlers.py index eadb55ce..646b1c06 100644 --- a/ad_miner/sources/modules/controls/graph_path_objects_to_ou_handlers.py +++ b/ad_miner/sources/modules/controls/graph_path_objects_to_ou_handlers.py @@ -123,6 +123,8 @@ def run(self): grid.setheaders(headers) grid_data = [] + self.maxInterest = 0 + for OU_node in analysis_dict: inbound_list = [ @@ -258,6 +260,8 @@ def run(self): interest, ) + self.maxInterest = max(interest, self.maxInterest) + # Color for stars if interest == 3: color = "red" @@ -280,9 +284,14 @@ def run(self): page.addComponent(grid) page.render() - self.data = len(self.compromise_paths_of_OUs) if self.compromise_paths_of_OUs else 0 + self.data = len(analysis_dict.keys()) - self.name_description = f"{len(self.compromise_paths_of_OUs or [])} dangerous control paths over OUs" + self.name_description = f"{self.data} dangerous control paths over OUs" def get_rating(self) -> int: - return presence_of(self.compromise_paths_of_OUs) + if self.data == 0: + return 5 + elif self.maxInterest >= 2: + return 1 + else: + return 3 diff --git a/ad_miner/sources/modules/controls/guest_accounts.py b/ad_miner/sources/modules/controls/guest_accounts.py index 6636303c..542586cf 100644 --- a/ad_miner/sources/modules/controls/guest_accounts.py +++ b/ad_miner/sources/modules/controls/guest_accounts.py @@ -39,12 +39,12 @@ def run(self): data = [] for account_name, domain, is_enabled in guest_list: - tmp_data = {"domain": ' ' + domain} - tmp_data["name"] = ' ' + account_name + tmp_data = {"domain": '' + domain} + tmp_data["name"] = '' + account_name tmp_data["enabled"] = ( - ' Enabled' + 'Enabled' if is_enabled - else ' Disabled' + else 'Disabled' ) data.append(tmp_data) diff --git a/ad_miner/sources/modules/controls/has_sid_history.py b/ad_miner/sources/modules/controls/has_sid_history.py index 5d36bd4e..63472067 100644 --- a/ad_miner/sources/modules/controls/has_sid_history.py +++ b/ad_miner/sources/modules/controls/has_sid_history.py @@ -55,13 +55,13 @@ def run(self): if row["Has SID History"] == name_user: origin_count = len(self.users_admin_computer_list[name_user]) row["Admin of"] = ( - f" {origin_count} computer{'s' if origin_count > 0 else ''} " + f" {origin_count} computer{'s' if origin_count > 0 else ''} " ) if row["Target"] == name_user: target_count = len(self.users_admin_computer_list[name_user]) row["admin of"] = ( - f" {target_count} computer{'s' if target_count > 0 else ''} " + f" {target_count} computer{'s' if target_count > 0 else ''} " ) # add user icons diff --git a/ad_miner/sources/modules/controls/kerberoastables.py b/ad_miner/sources/modules/controls/kerberoastables.py index c0d196e9..df5398fb 100644 --- a/ad_miner/sources/modules/controls/kerberoastables.py +++ b/ad_miner/sources/modules/controls/kerberoastables.py @@ -32,34 +32,113 @@ def __init__(self, arguments, requests_results) -> None: } self.users_kerberoastable_users = requests_results["nb_kerberoastable_accounts"] - + self.computers_domain = requests_results["nb_computers"] + self.dormant_users_domain = requests_results["dormant_accounts"] + self.disabled_users_domain = requests_results["nb_disabled_accounts"] + self.enabled_users_domain = requests_results["nb_enabled_accounts"] + self.unjustified_accounts = 0 + + + def run(self): if self.users_kerberoastable_users is None: return + + computer_names = set() #meilleur pour les perfs à tester + for comp in self.computers_domain: + name = comp.get("name") + if not name: + continue + base_name = name.split(".")[0].lower() # enlever le domaine + enabled_flag = comp["enabled"] + ghost_flag = comp["ghost"] + computer_names.add((base_name, enabled_flag, ghost_flag)) + + dormant_users = set() + for user in self.dormant_users_domain: + name = user.get("name") + if not name: + continue + base_name = name.split("@")[0].lower() + dormant_users.add(base_name) + + disabled_users = set() + for user in self.disabled_users_domain: + name = user.get("name") + if not name: + continue + base_name = name.split("@")[0].lower() + disabled_users.add(base_name) + + enabled_users = set() + for user in self.enabled_users_domain: + name = user.get("name") + if not name: + continue + base_name = name.split("@")[0].lower() + enabled_users.add(base_name) + + SPNs = [] child_headers = ["Account", "SPN"] for user in self.users_kerberoastable_users: n = 0 + unjustified_SPNs = 0 if not user.get("SPN"): continue for s in user["SPN"]: + clean_SPN = s.split("/")[1].split(".")[0].lower() + + # Ajout des warnings + if all((clean_SPN, False, ghost) in computer_names for ghost in (None, False, True)): + s += " 🚨 (Warning : Disabled Computer)" + unjustified_SPNs += 1 + elif (clean_SPN, True, True) in computer_names: + s += " 🚨 (Warning : Ghost Computer)" + unjustified_SPNs += 1 + elif clean_SPN in dormant_users: + s += " 🚨 (Warning : Dormant User)" + unjustified_SPNs += 1 + elif clean_SPN in disabled_users: + s += " 🚨 (Warning : Disabled User)" + unjustified_SPNs += 1 + elif all((clean_SPN, True, ghost) not in computer_names for ghost in (None, False)) \ + and clean_SPN not in enabled_users: + s += " 🚨 (Warning : Nonexistent Object)" + unjustified_SPNs += 1 + + child_dict = {} child_dict[child_headers[0]] = user["name"] child_dict[child_headers[1]] = s SPNs.append(child_dict) n += 1 + sortClass = str(n).zfill( 6 ) # used to make the sorting feature work with icons - user["SPN"] = grid_data_stringify( - { - "link": "%s.html?parameter=%s" - % ("kerberoastables_SPN", quote(str(user["name"]))), - "value": f"{n} SPN{'s' if n > 1 else ''}", - "before_link": f'', - } - ) + + if n == unjustified_SPNs: + user["SPN"] = grid_data_stringify( + { + "link": "%s.html?parameter=%s" + % ("kerberoastables_SPN", quote(str(user["name"]))), + "value": f"{n} SPN{'s' if n > 1 else ''} (Not Justified) 🛑", + "before_link": f'', + } + ) + else: + user["SPN"] = grid_data_stringify( + { + "link": "%s.html?parameter=%s" + % ("kerberoastables_SPN", quote(str(user["name"]))), + "value": f"{n} SPN{'s' if n > 1 else ''}{' (' + str(unjustified_SPNs) + ' Unjustified) ⚠️' if unjustified_SPNs > 0 else ''}", + "before_link": f'', + } + ) + if unjustified_SPNs > 0: + self.unjustified_accounts += 1 child_page = Page( self.arguments.cache_prefix, @@ -73,10 +152,11 @@ def run(self): child_page.addComponent(child_grid) child_page.render() + title = f"List of kerberoastable accounts {'(' + str(self.unjustified_accounts) + ' Unjustified accounts)' if self.unjustified_accounts > 0 else ''}" page = Page( self.arguments.cache_prefix, "kerberoastables", - "List of kerberoastable account", + title, self.get_dico_description(), ) grid = Grid("Kerberoastable users") @@ -87,18 +167,18 @@ def run(self): for elem in range(len(self.users_kerberoastable_users)): if self.users_kerberoastable_users[elem]["is_Domain_Admin"] == True: self.users_kerberoastable_users[elem]["name"] = ( - ' ' + '' + self.users_kerberoastable_users[elem]["name"] ) else: self.users_kerberoastable_users[elem]["name"] = ( - ' ' + '' + self.users_kerberoastable_users[elem]["name"] ) data = [] for dict in self.users_kerberoastable_users: - tmp_data = {"domain": ' ' + dict["domain"]} + tmp_data = {"domain": '' + dict["domain"]} tmp_data["name"] = dict["name"] tmp_data["Last password change"] = days_format(dict["pass_last_change"]) tmp_data["Account Creation Date"] = days_format(dict["accountCreationDate"]) diff --git a/ad_miner/sources/modules/controls/krb_last_change.py b/ad_miner/sources/modules/controls/krb_last_change.py index 26bf026b..395bba82 100644 --- a/ad_miner/sources/modules/controls/krb_last_change.py +++ b/ad_miner/sources/modules/controls/krb_last_change.py @@ -41,9 +41,9 @@ def run(self): data = [] for dict in self.users_krb_pwd_last_set: - tmp_data = {"domain": ' ' + dict["domain"]} + tmp_data = {"domain": '' + dict["domain"]} tmp_data["name"] = ( - ' ' + dict["name"] + '' + dict["name"] ) tmp_data["Last password change"] = days_format(dict["pass_last_change"]) tmp_data["Account Creation Date"] = days_format(dict["accountCreationDate"]) diff --git a/ad_miner/sources/modules/controls/ldap_configuration.py b/ad_miner/sources/modules/controls/ldap_configuration.py new file mode 100644 index 00000000..8fcde518 --- /dev/null +++ b/ad_miner/sources/modules/controls/ldap_configuration.py @@ -0,0 +1,92 @@ +from ad_miner.sources.modules.controls import Control +from ad_miner.sources.modules.controls import register_control +from ad_miner.sources.modules.page_class import Page +from ad_miner.sources.modules.grid_class import Grid + + +@register_control +class ldap_configuration(Control): + "LDAP and LDAPS server configuration" + + # LDAP + no LDAP signing required -> relay to LDAP possible (from SMB if drom the mic / NTLMv1) + # LDAPS + no EPA -> relay to LDAP possible (seems not correctly implemented by ntlmrelayx when coming from SMB) + # LDAPS doesn't support LDAP signing (because based on SSL like HTTPS) + + def __init__(self, arguments, requests_results) -> None: + super().__init__(arguments, requests_results) + + self.azure_or_onprem = "on_premise" + self.category = "misc" + self.control_key = "ldap_configuration" + + self.title = "LDAP servers configuration" + self.description = "LDAP(S) allows for the modification of permissions and configurations within the Active Directory. Signing protects against relay attacks targeting LDAP. Extended Protection for Authentication (EPA) protects against relay attacks targeting LDAPS." + self.risk = "Relay attacks targeting LDAP or LDAPS allow attackers to move laterally and vertically, potentially compromising the domain." + self.poa = "Enable LDAP signing and Extended Protection for Authentication (EPA) to protect against relay attacks. Audit mode allows to assess the feasibility of this action." + + self.ldap_configuration = requests_results["ldap_server_configuration"] + + def run(self): + self.ldap_relay_possible = False + self.ldaps_relay_possible = False + self.misconfigurations = 0 + + page = Page(self.arguments.cache_prefix, "ldap_configuration", "LDAP and LDAPS configuration", self.get_dico_description()) + grid = Grid("LDAP and LDAPS configuration") + grid.setheaders(["domain", "name", "LDAP available", "LDAP signing", "LDAPS available", "LDAPS EPA", "Relay to LDAP(S)"]) + + data = [] + for d in self.ldap_configuration: + tmp_data = {} + tmp_data["domain"] = '' + d["domain"] + tmp_data["name"] = '' + d["name"] + + if d["ldap"]: + tmp_data["LDAP available"] = 'LDAP available' + elif d["ldap"] is not None: + tmp_data["LDAP available"] = 'LDAP unavailable' + else: + tmp_data["LDAP available"] = "Not collected" + + if d["ldapsigning"]: + tmp_data["LDAP signing"] = 'Signing required' + elif d["ldapsigning"] is not None: + tmp_data["LDAP signing"] = 'Signing not required' + else: + tmp_data["LDAP signing"] = "Not collected" + + if d["ldaps"]: + tmp_data["LDAPS available"] = 'LDAPS available' + elif d["ldaps"] is not None: + tmp_data["LDAPS available"] = 'LDAPS unavailable' + else: + tmp_data["LDAPS available"] = "Not collected" + + if d["ldapsepa"]: + tmp_data["LDAPS EPA"] = 'EPA required' + elif d["ldapsepa"] is not None: + tmp_data["LDAPS EPA"] = 'EPA not required' + else: + tmp_data["LDAPS EPA"] = "Not collected" + + if (d["ldap"] and d["ldapsigning"] is not None and not d["ldapsigning"]) or (d["ldaps"] and d["ldapsepa"] is not None and not d["ldapsepa"]): + tmp_data["Relay to LDAP(S)"] = 'Relay to LDAP(S) possible' + self.ldap_relay_possible = True + self.misconfigurations += 1 + else: + tmp_data["Relay to LDAP(S)"] = 'Relay to LDAP(S) impossible' + + data.append(tmp_data) + grid.setData(data) + page.addComponent(grid) + page.render() + + self.data = self.misconfigurations + self.name_description = f"{self.data} LDAP(S) misconfiguration{'s' if self.data > 1 else ''} allow relay to LDAP" + + def get_rating(self) -> int: + # -1 = grey, 1 = red, 2 = orange, 3 = yellow, 4 =green, 5 = green, + if self.ldap_relay_possible or self.ldaps_relay_possible: + return 1 + else: + return 5 diff --git a/ad_miner/sources/modules/controls/nb_domain_admins.py b/ad_miner/sources/modules/controls/nb_domain_admins.py index bbeb2174..efd9d523 100644 --- a/ad_miner/sources/modules/controls/nb_domain_admins.py +++ b/ad_miner/sources/modules/controls/nb_domain_admins.py @@ -48,8 +48,8 @@ def run(self): for da in self.users_nb_domain_admins: tmp_data = {} - tmp_data["domain"] = ' ' + da["domain"] - tmp_data["name"] = ' ' + da["name"] + tmp_data["domain"] = '' + da["domain"] + tmp_data["name"] = '' + da["name"] tmp_data["domain admin"] = ( 'True' if "Domain Admin" in da["admin type"] diff --git a/ad_miner/sources/modules/controls/never_expires.py b/ad_miner/sources/modules/controls/never_expires.py index 1671808a..6234f84a 100644 --- a/ad_miner/sources/modules/controls/never_expires.py +++ b/ad_miner/sources/modules/controls/never_expires.py @@ -41,11 +41,11 @@ def run(self): # Add admin icon if user["name"] in self.admin_list: user["name"] = ( - ' ' + '' + user["name"] ) else: - user["name"] = ' ' + user["name"] + user["name"] = '' + user["name"] page = Page( self.arguments.cache_prefix, "never_expires", @@ -66,7 +66,7 @@ def run(self): data = [] for dict in self.users_password_never_expires: tmp_data = { - "domain": ' ' + dict["domain"], + "domain": '' + dict["domain"], "name": dict["name"], } tmp_data["Last login"] = days_format(dict["LastLogin"]) diff --git a/ad_miner/sources/modules/controls/non-dc_with_unconstrained_delegations.py b/ad_miner/sources/modules/controls/non-dc_with_unconstrained_delegations.py index 9ac20a24..18decad8 100644 --- a/ad_miner/sources/modules/controls/non-dc_with_unconstrained_delegations.py +++ b/ad_miner/sources/modules/controls/non-dc_with_unconstrained_delegations.py @@ -92,16 +92,16 @@ def run(self): tmp_data = {} if node.labels == "User": - pretty_name = f' {end_node}' + pretty_name = f'{end_node}' elif node.labels == "Computer": - pretty_name = f' {end_node}' + pretty_name = f'{end_node}' else: pretty_name = end_node tmp_data["Configured for Kerberos Unconstrained Delegation"] = pretty_name tmp_data["Compromise Paths"] = grid_data_stringify( { - "value": f'{len(self.kud_graphs[end_node])} ', + "value": f'{len(self.kud_graphs[end_node])} ', "link": "%s_kud_graph.html" % quote(str(end_node)), } ) diff --git a/ad_miner/sources/modules/controls/objects_to_adcs.py b/ad_miner/sources/modules/controls/objects_to_adcs.py index ceadab46..3daffa1c 100644 --- a/ad_miner/sources/modules/controls/objects_to_adcs.py +++ b/ad_miner/sources/modules/controls/objects_to_adcs.py @@ -56,8 +56,10 @@ def run(self): self.ADCS_path_sorted = {} self.ADCS_entry_point = [] + self.unique_starting_points = {} for path in self.objects_to_adcs: + self.unique_starting_points[path.nodes[0].id] = True self.ADCS_entry_point.append(path.nodes[0].name) try: self.ADCS_path_sorted[path.nodes[-1].name].append(path) @@ -71,9 +73,9 @@ def run(self): for key, paths in self.ADCS_path_sorted.items(): tmp_data = {} tmp_data["Domain"] = ( - ' ' + paths[0].nodes[-1].domain + '' + paths[0].nodes[-1].domain ) - tmp_data["Name"] = ' ' + key + tmp_data["Name"] = '' + key nb_path_to_adcs = len(paths) self.total_paths += nb_path_to_adcs sortClass = str(nb_path_to_adcs).zfill(6) @@ -81,7 +83,7 @@ def run(self): { "link": "path_to_adcs_%s.html" % quote(str(key)), "value": f"{nb_path_to_adcs} paths to ADCS", - "before_link": f"", + "before_link": f"", } ) cleaned_data.append(tmp_data) @@ -101,10 +103,10 @@ def run(self): page.addComponent(grid) page.render() - self.data = self.total_paths + self.data = len(self.unique_starting_points.keys()) self.name_description = ( - f"{self.data} non-tier-0 with local admin privileges on ADCS" + f"{self.data} non-tier-$0$ with local admin privileges on ADCS" ) def get_rating(self) -> int: diff --git a/ad_miner/sources/modules/controls/objects_to_operators_member.py b/ad_miner/sources/modules/controls/objects_to_operators_member.py index 62c27188..5ad7ba11 100644 --- a/ad_miner/sources/modules/controls/objects_to_operators_member.py +++ b/ad_miner/sources/modules/controls/objects_to_operators_member.py @@ -49,8 +49,8 @@ def run(self): data[path.nodes[0].name]["target"].append(path.nodes[-1].name) except KeyError: data[path.nodes[0].name] = { - "domain": ' ' + path.nodes[-1].domain, - "name": ' ' + path.nodes[0].name, + "domain": '' + path.nodes[-1].domain, + "name": '' + path.nodes[0].name, "link": quote(str(path.nodes[0].name)), "target": [path.nodes[-1].name], "paths": [path], @@ -63,8 +63,8 @@ def run(self): KeyError ): # Really **should not** happen, but to prevent crash in case of corrupted cache/db data[path.nodes[-1].name] = { - "domain": ' ' + path.nodes[-1].domain, - "name": ' ' + path.nodes[-1].name, + "domain": '' + path.nodes[-1].domain, + "name": '' + path.nodes[-1].name, "link": quote(str(path.nodes[-1].name)), "target": [""], "paths": [path], @@ -81,7 +81,7 @@ def run(self): { "value": f"{len(d['paths'])} paths target{'s' if len(d['target'])>1 else ''}", "link": f"objects_to_operators_{quote(str(d['link']))}.html", - "before_link": f"", + "before_link": f"", } ), "targets": ",".join(d["target"]), diff --git a/ad_miner/sources/modules/controls/pre_windows_2000_compatible_access_group.py b/ad_miner/sources/modules/controls/pre_windows_2000_compatible_access_group.py index c9f43c00..6d9518d2 100644 --- a/ad_miner/sources/modules/controls/pre_windows_2000_compatible_access_group.py +++ b/ad_miner/sources/modules/controls/pre_windows_2000_compatible_access_group.py @@ -56,7 +56,7 @@ def run(self): data = [] for domain, account_name, objectid, type_list in sorted_list: - tmp_data = {"Domain": ' ' + domain} + tmp_data = {"Domain": '' + domain} type_clean = generic_formating.clean_label(type_list) @@ -67,7 +67,7 @@ def run(self): tmp_data["Rating"] = ( '' if "1-5-7" not in objectid - else ' Anonymous' + else ' Anonymous' ) data.append(tmp_data) diff --git a/ad_miner/sources/modules/controls/primaryGroupID_lower_than_1000.py b/ad_miner/sources/modules/controls/primaryGroupID_lower_than_1000.py index 7dbf7abd..c0da1ff7 100644 --- a/ad_miner/sources/modules/controls/primaryGroupID_lower_than_1000.py +++ b/ad_miner/sources/modules/controls/primaryGroupID_lower_than_1000.py @@ -5,6 +5,7 @@ from ad_miner.sources.modules.grid_class import Grid from ad_miner.sources.modules.utils import MODULES_DIRECTORY + import json @@ -20,23 +21,17 @@ def __init__(self, arguments, requests_results) -> None: self.control_key = "rid_singularities" self.title = "Unexpected PrimaryGroupID" - self.description = ( - "Accounts with either an unknown RID, or RID-name missmatches." - ) + self.description = "Accounts with either an unknown RID, or RID-name missmatches." self.risk = "In Active Directory, the primaryGroupId attribute of a user or machine account implicitly assigns this account to a group, even if this group is not listed in the user's memberOf attribute. Membership of a group via this attribute does not appear in the list of group members in certain interfaces. This attribute can be used to hide an account's membership of a group." self.poa = "We recommend that you reset the primaryGroupId attributes of the users or computers concerned to their default values." - self.primaryGroupID_lower_than_1000 = requests_results[ - "primaryGroupID_lower_than_1000" - ] + self.primaryGroupID_lower_than_1000 = requests_results["primaryGroupID_lower_than_1000"] def run(self): if self.primaryGroupID_lower_than_1000 is None: self.primaryGroupID_lower_than_1000 = [] - known_RIDs = json.loads( - (MODULES_DIRECTORY / "known_RIDs.json").read_text(encoding="utf-8") - ) + known_RIDs = json.loads((MODULES_DIRECTORY / "known_RIDs.json").read_text(encoding="utf-8")) page = Page( self.arguments.cache_prefix, @@ -54,32 +49,22 @@ def run(self): tmp_data = {} if str(rid) not in known_RIDs: - tmp_data["domain"] = ' ' + domain + tmp_data["domain"] = '' + domain tmp_data["RID"] = str(rid) - tmp_data["name"] = ( - ' ' + name if is_da else name - ) + tmp_data["name"] = '' + name if is_da else name tmp_data["reason"] = "Unknown RID" data.append(tmp_data) elif name_without_domain not in known_RIDs[str(rid)]: - tmp_data["domain"] = ' ' + domain + tmp_data["domain"] = '' + domain tmp_data["RID"] = str(rid) - tmp_data["name"] = ( - ' ' + name if is_da else name - ) - tmp_data["reason"] = ( - "Unexpected name, expected : " + known_RIDs[str(rid)][0] - ) + tmp_data["name"] = '' + name if is_da else name + tmp_data["reason"] = "Unexpected name, expected : " + known_RIDs[str(rid)][0] data.append(tmp_data) data = sorted(data, key=lambda x: x["RID"]) - sorted_data = [ - tmp_data for tmp_data in data if tmp_data["reason"].startswith("Unknown") - ] - sorted_data += [ - tmp_data for tmp_data in data if tmp_data["reason"].startswith("Unexpected") - ] + sorted_data = [tmp_data for tmp_data in data if tmp_data["reason"].startswith("Unknown")] + sorted_data += [tmp_data for tmp_data in data if tmp_data["reason"].startswith("Unexpected")] self.rid_singularities = len(sorted_data) @@ -92,9 +77,7 @@ def run(self): self.data = self.rid_singularities # TODO define the sentence that will be displayed in the 'smolcard' view and in the center of the mainpage - self.name_description = ( - f"{self.data} accounts with unknown RIDs or unexpected names" - ) + self.name_description = f"{self.data} accounts with unknown RIDs or unexpected names" def get_rating(self) -> int: return 2 if self.rid_singularities > 0 else 5 diff --git a/ad_miner/sources/modules/controls/privileged_accounts_outside_Protected_Users.py b/ad_miner/sources/modules/controls/privileged_accounts_outside_Protected_Users.py index 4e2dbcec..b7772f0d 100644 --- a/ad_miner/sources/modules/controls/privileged_accounts_outside_Protected_Users.py +++ b/ad_miner/sources/modules/controls/privileged_accounts_outside_Protected_Users.py @@ -32,10 +32,10 @@ def run(self): page = Page( self.arguments.cache_prefix, "privileged_accounts_outside_Protected_Users", - "Priviledged accounts not part of the Protected Users group", + "Privileged accounts not part of the Protected Users group", self.get_dico_description(), ) - grid = Grid("Priviledged accounts not part of the Protected Users group") + grid = Grid("Privileged accounts not part of the Protected Users group") grid.setheaders( [ "domain", @@ -55,8 +55,8 @@ def run(self): for dic in self.users_nb_domain_admins: if "Protected Users" in dic["admin type"]: continue - tmp_data = {"domain": ' ' + dic["domain"]} - tmp_data["name"] = ' ' + dic["name"] + tmp_data = {"domain": '' + dic["domain"]} + tmp_data["name"] = '' + dic["name"] tmp_data["domain admin"] = ( 'True' if "Domain Admin" in dic["admin type"] @@ -88,7 +88,7 @@ def run(self): else '' ) tmp_data["protected user"] = ( - ' Unprotected' + 'Unprotected' ) data.append(tmp_data) @@ -105,9 +105,7 @@ def run(self): ) # TODO define the sentence that will be displayed in the 'smolcard' view and in the center of the mainpage - self.name_description = ( - f"{self.data} priviledged accounts not in Protected Users group" - ) + self.name_description = f"{self.data} privileged accounts not in Protected Users group" def get_rating(self) -> int: return presence_of( @@ -115,5 +113,6 @@ def get_rating(self) -> int: dic for dic in self.users_nb_domain_admins if "Protected Users" not in dic["admin type"] - ] + ], + criticity=2, ) diff --git a/ad_miner/sources/modules/controls/server_users_could_be_admin.py b/ad_miner/sources/modules/controls/server_users_could_be_admin.py index dc9cc2bd..83a4927f 100644 --- a/ad_miner/sources/modules/controls/server_users_could_be_admin.py +++ b/ad_miner/sources/modules/controls/server_users_could_be_admin.py @@ -22,7 +22,7 @@ def __init__(self, arguments, requests_results) -> None: self.title = "Paths to servers" self.description = ( - "Users could gain administration privileges on some servers." + "Users could gain administration privileges privileges on some servers." ) self.interpretation = "" self.risk = "Inadequate administration rights on computers can lead to easy privilege escalation for an attacker. With a privileged account, it is possible to perform local memory looting to find credentials for example." @@ -46,7 +46,7 @@ def run(self): ["Computers", "Users who have a server compromise path"], "server_compromisable", icon=icon, - icon2=' ', + icon2='', ) page = Page( diff --git a/ad_miner/sources/modules/controls/smb_signing.py b/ad_miner/sources/modules/controls/smb_signing.py new file mode 100644 index 00000000..f9b66d5b --- /dev/null +++ b/ad_miner/sources/modules/controls/smb_signing.py @@ -0,0 +1,82 @@ +from ad_miner.sources.modules.controls import Control +from ad_miner.sources.modules.controls import register_control +from ad_miner.sources.modules.page_class import Page +from ad_miner.sources.modules.grid_class import Grid +from ad_miner.sources.modules.utils import days_format + + +@register_control +class smb_signing(Control): + "SMB signing requirements amongst computers" + + def __init__(self, arguments, requests_results) -> None: + super().__init__(arguments, requests_results) + + self.azure_or_onprem = "on_premise" + self.category = "misc" + self.control_key = "smb_signing" + + self.title = "SMB signing requirement" + self.description = "Computers that do not require SMB signing are vulnerable to relay attacks." + self.risk = "Not requiring SMB signing on domain controllers could lead to immediate domain compromission through coerced authentication and relay attack." + self.poa = "Require SMB signing on all computers with a GPO. Prioritize domain controllers or particularly sensitive servers, then other servers and finally workstations. Audit mode can be enabled to check the feasibility of the operation." + + self.smb_signing = requests_results["smb_signing"] + + def run(self): + self.dc_without_signing = False + self.server_without_signing = False + self.workstation_without_signing = False + + page = Page(self.arguments.cache_prefix, "smb_signing", "SMB signing", self.get_dico_description()) + grid = Grid("SMB signing") + grid.setheaders(["domain", "name", "type", "last logon", "signing"]) + data = [] + for d in self.smb_signing: + tmp_data = {} + tmp_data["domain"] = '' + d["domain"] + tmp_data["name"] = '' + d["name"] + if d["dc"]: + tmp_data["type"] = 'Domain Controller' + tmp_data["order2"] = 0 + if not self.dc_without_signing and d["smbsigning"] is not None and not d["smbsigning"]: + self.dc_without_signing = True + elif d["server"]: + tmp_data["type"] = "Server" + tmp_data["order2"] = 1 + if not self.server_without_signing and d["smbsigning"] is not None and not d["smbsigning"]: + self.server_without_signing = True + else: + tmp_data["type"] = "Workstation" + tmp_data["order2"] = 2 + if not self.workstation_without_signing and d["smbsigning"] is not None and not d["smbsigning"]: + self.workstation_without_signing = True + tmp_data["last logon"] = days_format(d["lastlogontimestamp"]) + if d["smbsigning"] is None: + tmp_data["signing"] = "Not collected" + tmp_data["order"] = 2 + elif d["smbsigning"]: + tmp_data["signing"] = 'Signing required' + tmp_data["order"] = 1 + else: + tmp_data["signing"] = 'Signing not required' + tmp_data["order"] = 0 + data.append(tmp_data) + sorted_data = sorted(data, key=lambda x: x["order2"]) + sorted_sorted_data = sorted(sorted_data, key=lambda x: x["order"]) + grid.setData(sorted_sorted_data) + page.addComponent(grid) + page.render() + + self.data = len([c for c in self.smb_signing if c["smbsigning"] is not None and not c["smbsigning"]]) + self.name_description = f"{self.data} computer{'s' if self.data > 1 else ''} without SMB signing requirement" + + def get_rating(self) -> int: + # -1 = grey, 1 = red, 2 = orange, 3 = yellow, 4 =green, 5 = green, + if self.dc_without_signing: + return 1 + elif self.server_without_signing: + return 2 + elif self.workstation_without_signing: + return 3 + return 5 diff --git a/ad_miner/sources/modules/controls/up_to_date_admincount.py b/ad_miner/sources/modules/controls/up_to_date_admincount.py index 3f6558ff..d0a1aefd 100644 --- a/ad_miner/sources/modules/controls/up_to_date_admincount.py +++ b/ad_miner/sources/modules/controls/up_to_date_admincount.py @@ -56,8 +56,8 @@ def run(self): for dic in self.users_nb_domain_admins: if dic["admincount"]: continue - tmp_data = {"domain": ' ' + dic["domain"]} - tmp_data["name"] = ' ' + dic["name"] + tmp_data = {"domain": '' + dic["domain"]} + tmp_data["name"] = '' + dic["name"] tmp_data["domain admin"] = ( 'True' if "Domain Admin" in dic["admin type"] @@ -89,13 +89,13 @@ def run(self): else '' ) tmp_data["admincount"] = ( - ' Missing admincount' + 'Missing admincount' ) data.append(tmp_data) for name, domain, da_type in self.unpriviledged_users_with_admincount: - tmp_data = {"domain": ' ' + domain} - tmp_data["name"] = ' ' + name + tmp_data = {"domain": '' + domain} + tmp_data["name"] = '' + name tmp_data["domain admin"] = '' tmp_data["schema admin"] = '' tmp_data["enterprise admin"] = '' @@ -103,7 +103,7 @@ def run(self): tmp_data["enterprise key admin"] = '' tmp_data["builtin admin"] = '' tmp_data["admincount"] = ( - ' Misleading admincountTrue' + 'Misleading admincountTrue' ) data.append(tmp_data) diff --git a/ad_miner/sources/modules/controls/users_GPO_access.py b/ad_miner/sources/modules/controls/users_GPO_access.py index 52e13410..13992b6d 100644 --- a/ad_miner/sources/modules/controls/users_GPO_access.py +++ b/ad_miner/sources/modules/controls/users_GPO_access.py @@ -26,7 +26,7 @@ def __init__(self, arguments, requests_results) -> None: self.title = "Inadequate GPO modifications privileges" self.description = "GPOs that can be edited by unprivileged users." - self.risk = "If an AD object has rights over a GPO, it can potentially cause damage over all the objects affected by the GPO. GPOs can also be leveraged to gain privileges in the domain(s). If an attacker exploits one of these paths, they will be able to gain privileges in the domain(s) and cause some serious damage.{self.users_rdp_access_1[key]}
", + "value": f"{len(self.users_rdp_access_1[key])} Computers{self.users_rdp_access_1[key]}
", "link": f"users_rdp_access.html?parameter={quote(str(key))}", "before_link": f'', } diff --git a/ad_miner/sources/modules/controls/users_shadow_credentials.py b/ad_miner/sources/modules/controls/users_shadow_credentials.py index cfa79824..0568c2e5 100644 --- a/ad_miner/sources/modules/controls/users_shadow_credentials.py +++ b/ad_miner/sources/modules/controls/users_shadow_credentials.py @@ -59,13 +59,13 @@ def run(self): for d in data.values(): sortClass = str(len(d["paths"])).zfill(6) tmp_grid_data = { - "domain": ' ' + d["domain"], - "name": ' ' + d["name"], + "domain": '' + d["domain"], + "name": '' + d["name"], "target": grid_data_stringify( { "value": f"{len(d['paths'])} paths to {len(d['target'])} target{'s' if len(d['target'])>1 else ''}", "link": f"users_shadow_credentials_from_{quote(str(d['name']))}.html", - "before_link": f"", + "before_link": f"", } ), } diff --git a/ad_miner/sources/modules/controls/users_shadow_credentials_to_non_admins.py b/ad_miner/sources/modules/controls/users_shadow_credentials_to_non_admins.py index a51bae38..431a3c03 100644 --- a/ad_miner/sources/modules/controls/users_shadow_credentials_to_non_admins.py +++ b/ad_miner/sources/modules/controls/users_shadow_credentials_to_non_admins.py @@ -53,14 +53,13 @@ def run(self): sortClass = str(nb_paths).zfill(6) grid_data.append( { - "domain": ' ' + data[target]["domain"], - "target": ' ' - + data[target]["target"], + "domain": '' + data[target]["domain"], + "target": '' + data[target]["target"], "paths": grid_data_stringify( { "value": f"{nb_paths} paths to target", "link": f"users_shadow_credentials_to_non_admins_to_{quote(str(data[target]['target']))}.html", - "before_link": f"", + "before_link": f"", } ), } diff --git a/ad_miner/sources/modules/exploitability_ratings.json b/ad_miner/sources/modules/exploitability_ratings.json index 17a079de..5b38bed3 100644 --- a/ad_miner/sources/modules/exploitability_ratings.json +++ b/ad_miner/sources/modules/exploitability_ratings.json @@ -6,8 +6,8 @@ "AddSelf": 10, "AdminTo": 5, "AllExtendedRights": 30, - "AllowedToAct": 20, - "AllowedToDelegate": 25, + "AllowedToAct": 15, + "AllowedToDelegate": 30, "AZAddMembers": 10, "AZAddOwner": 30, "AZAddSecret": 30, @@ -50,12 +50,15 @@ "AZVMAdminLogin": 10, "AZVMContributor": 10, "AZWebsiteContributor": 90, + "CanApplyGPO":5, "CanExtractDCSecrets": 20, "CanLoadCode": 100, "CanLogOnLocallyOnDC": 20, "CanPSRemote": 100, "CanRDP": 80, - "Contains": 30, + "ClaimSpecialIdentity":0, + "Contains": 100, + "ContainsIdentity":100, "DCSync": 0, "DumpSMSAPassword": 50, "ExecuteDCOM": 100, @@ -65,20 +68,26 @@ "GetChanges": 15, "GetChangesAll": 15, "GetChangesInFilteredSet": 15, + "GPOAppliesTo": 5, "GPLink": 40, "HasSession": 11, "HasSIDHistory": 0, + "HasTrustKeys":30, "MemberOf": 0, + "MemberOfLocalGroup":0, "Owns": 11, + "OwnsRaw": 11, + "PropagatesACEsTo":5, + "ProtectAdminGroups":15, "ReadGMSAPassword": 30, "ReadLAPSPassword": 30, "SQLAdmin": 60, "SyncLAPSPassword": 30, - "TrustedBy": 0, - "UnconstrainedDelegations": 0, + "UnconstrainedDelegations": 50, "WriteAccountRestrictions": 20, "WriteDacl": 10, "WriteOwner": 10, + "WriteOwnerRaw": 10, "WriteSPN": 40, "Synced": 50, "SyncedToADUser": 50, @@ -89,7 +98,7 @@ "RemoteInteractiveLogonPrivilege": 100, "Enroll": 40, "HostsCAService": 100, - "GoldenCert": 10, + "GoldenCert": 5, "EnterpriseCAFor": 100, "TrustedForNTAuth": 100, "PublishedTo": 100, @@ -98,20 +107,31 @@ "RootCAFor": 100, "IssuedSignedBy": 100, "WriteGPLink": 40, - "ADCSESC1": 20, + "ADCSESC1": 10, "ADCSESC2": 25, - "ADCSESC3": 30, + "ADCSESC3": 15, "ADCSESC4": 40, "ADCSESC5": 50, "ADCSESC6a": 30, "ADCSESC6b": 30, "ADCSESC7": 20, - "ADCSESC8": 20, + "ADCSESC8": 15, "ADCSESC9a": 30, "ADCSESC9b": 30, "ADCSESC10a": 30, "ADCSESC10b": 30, "ADCSESC11": 25, "ADCSESC12": 40, - "ADCSESC13": 40 + "ADCSESC13": 40, + "ADCSESC15": 15, + "RemoteInteractiveLogonRight": 100, + "CoerceToTGT": 50, + "AbuseTGTDelegation": 10, + "SameForestTrust": 0, + "SpoofSIDHistory": 10, + "CrossForestTrust": 100, + "CoerceAndRelayNTLMToLDAPS": 20, + "CoerceAndRelayNTLMToLDAP": 20, + "CoerceAndRelayNTLMToADCS": 10, + "CoerceAndRelayNTLMToSMB": 10 } diff --git a/ad_miner/sources/modules/generic_formating.py b/ad_miner/sources/modules/generic_formating.py index ced334ce..2a89420a 100755 --- a/ad_miner/sources/modules/generic_formating.py +++ b/ad_miner/sources/modules/generic_formating.py @@ -20,18 +20,9 @@ def clean_data_type(data, list_type_to_clean): data[k][type_name] = clean_label(data[k][type_name]) return data - + def get_label_icon_dictionary(): - return { - "User":"", - "Computer": "", - "Group": "", - "OU": "", - "Container": "", - "Domain": "", - "GPO": "", - "Unknown": "" - } + return {"User": "", "Computer": "", "Group": "", "ADLocalGroup": "", "OU": "", "Container": "", "Domain": "", "GPO": "", "Unknown": ""} def get_label_icon(name): if name in get_label_icon_dictionary(): @@ -40,7 +31,6 @@ def get_label_icon(name): return get_label_icon_dictionary()["Unknown"] - # format data for grid components format: list of dicts [{key1:value1}, {key2:value2}] def formatGridValues2Columns(data, headers, prefix, icon="", icon2=""): output = [] @@ -89,8 +79,9 @@ def formatGridValues3Columns(data, headers, prefix): headers[0]: dict[headers[0]], headers[1]: dict[headers[1]], headers[2]: { - "link": "%s.html?parameter=%s" % (quote(str(prefix)), quote(str(dict[headers[0]]))), - "value": "Show list of objects%s
" + "link": "%s.html?parameter=%s" + % (quote(str(prefix)), quote(str(dict[headers[0]]))), + "value": "Show list of objects%s
" % dict[headers[2]], }, } diff --git a/ad_miner/sources/modules/graph_class.py b/ad_miner/sources/modules/graph_class.py index aeb69209..7a2cc3e3 100755 --- a/ad_miner/sources/modules/graph_class.py +++ b/ad_miner/sources/modules/graph_class.py @@ -41,9 +41,7 @@ def addGroupDA(self, group_da): self.group_da = group_da def addDisabledUsers(self, disabled_users): - self.disabled_users_dict = {} - for d in disabled_users: - self.disabled_users_dict[d["name"]] = True + self.disabled_users_dict = disabled_users def addKerberoastableUsers(self, kerberoastable_users): self.kerberoastable_users = kerberoastable_users @@ -80,13 +78,18 @@ def render(self, page_f): "OU", "Group", "Domain", - "ADLocalGroup", "Container", - "Unknown", - "Group_cluster", - "Device", + "AZDevice", "AZTenant", "AZRole", + "ADLocalGroup", + "AZApp", + "CertTemplate", + "RootCA", + "EnterpriseCA", + "AIACA", + "NTAuthStore", + "AZServicePrincipal", ] if node.labels in list_labels: @@ -149,6 +152,10 @@ def render(self, page_f): "attributes": node_attributes, } self.nodes[path.nodes[i].id] = final_graph_node + elif self.nodes[path.nodes[i].id]["position"] == "intermediate" and ( + node_position == "end" or node_position == "start" + ): + self.nodes[path.nodes[i].id]["position"] = node_position if i != 0: relation = { diff --git a/ad_miner/sources/modules/grid_class.py b/ad_miner/sources/modules/grid_class.py index 2c7f26ff..a1b452fb 100755 --- a/ad_miner/sources/modules/grid_class.py +++ b/ad_miner/sources/modules/grid_class.py @@ -1,5 +1,9 @@ # row format : {key1 : {"value": value, "link":link}, key2 : value2} from ad_miner.sources.modules.utils import HTML_DIRECTORY +from ad_miner.sources.modules import logger + +import json +import os class Grid: @@ -66,3 +70,46 @@ def render(self, page_f): new_contents = template_contents.replace("// DATA PLACEHOLDER", textToInsert) page_f.write(new_contents) + + def writeEvolutionJSON(self, render_prefix, grid_key, data): + evolution_folder = os.path.join(".", "evolution_data") + os.makedirs(evolution_folder, exist_ok=True) + + evolution_json_path = os.path.join( + evolution_folder, render_prefix + "_grid_" + grid_key + ) + + with open(evolution_json_path, "w") as f: + f.write(json.dumps(data, indent=4)) + + def AddNewIconsToNewLines(self, previous_render_prefix, grid_key, column): + # Skip if no previous render prefix was created + if previous_render_prefix == "": + return + try: + evolution_json_path = os.path.join( + ".", "evolution_data", previous_render_prefix + "_grid_" + grid_key + ) + with open(evolution_json_path) as f: + previous_data = json.load(f) + + # Creating a dic with existing entries in the previous dic to improve performance + existing_lines = {} + for dict in previous_data: + existing_lines[dict[column]] = True + + # Editing current data to add a "new" mark to new lines + for d in self.data: + v = d[column] + if v not in existing_lines: + d[column] = ( + '' + + v + + " (new)" + ) + + except Exception as e: + logger.print_error( + f'"New" icon will not be displayed as an error occurend when trying to open the json file {evolution_json_path}' + ) + logger.print_error(e) diff --git a/ad_miner/sources/modules/known_RIDs.json b/ad_miner/sources/modules/known_RIDs.json index c13fd9f1..5d099f0d 100644 --- a/ad_miner/sources/modules/known_RIDs.json +++ b/ad_miner/sources/modules/known_RIDs.json @@ -277,7 +277,8 @@ ], "498": [ "ENTERPRISE_READONLY_DOMAIN_CONTROLLERS", - "ENTERPRISE READ-ONLY DOMAIN CONTROLLERS" + "ENTERPRISE READ-ONLY DOMAIN CONTROLLERS", + "CONTR\u00d4LEURS DE DOMAINE D\u2019ENTREPRISE EN LECTURE SEULE" ], "4\u00c2\u00a0\u00d0\u00bc\u00d0\u00bb\u00d0\u00bd \u00d0\u00b4\u00d0\u00be\u00d0\u00bb\u00d0\u00bb.\u00c2\u00a0\u00d0\u00a1\u00d0\u00a8\u00d0\u0090": [ "ADMINS" @@ -328,7 +329,8 @@ "\u00d0\u0090\u00d0\u00b4\u00d0\u00bc\u00d0\u00b8\u00d0\u00bd\u00d0\u00b8\u00d1\u0081\u00d1\u0082\u00d1\u0080\u00d0\u00b0\u00d1\u0082\u00d0\u00be\u00d1\u0080\u00d1\u008b \u00d0\u00b4\u00d0\u00be\u00d0\u00bc\u00d0\u039c\u00d0\u00bd\u00d0\u00b0", "ADMINISTRADORES DO DOM\u00c3\u00adNIO", "DOMAIN_ADMINS", - "DOM\u00c4NEN-ADMINS" + "DOM\u00c4NEN-ADMINS", + "ADMINS DU DOMAINE" ], "513": [ "DOMAIN USERS", @@ -351,7 +353,7 @@ "GUESTS", "INVITADOS DEL DOMINIO", "\u00c7\u00b6\u00b2\u00c5\u009f\u009f\u00c4\u00be\u0086\u00c8\u00b3\u0093", - "INVIT\u00c3\u00a9S DU DOMAINE", + "INVIT\u00c9S DU DOMAINE", "DOM\u00c3\u00a4NEN-G\u00c3\u00a4STE", "TAMU DOMAIN", "\u00d0\u0093\u00d0\u00be\u00d1\u0081\u00d1\u0082\u00d0\u00b8 \u00d0\u00b4\u00d0\u00be\u00d0\u00bc\u00d0\u039c\u00d0\u00bd\u00d0\u00b0", @@ -380,7 +382,7 @@ "CONTROLADORES DE DOMINIO", "\u00c5\u009f\u009f\u00c6\u008e\u00a7\u00c5\u0088\u00b6\u00c5\u0099\u00a8", "\u00c7\u00b6\u00b2\u00c5\u009f\u009f\u00c6\u008e\u00a7\u00c5\u0088\u00b6\u00c7\u00ab\u0099", - "CONTR\u00c3\u00b4LEURS DE DOMAINE", + "CONTR\u00d4LEURS DE DOMAINE", "DOM\u00c3\u00a4NENCONTROLLER", "PENGENDALI DOMAIN", "CONTROLLER DI DOMINIO", @@ -394,7 +396,7 @@ "CERT PUBLISHERS", "CERT_ADMINS", "PUBLICADORES DE CERTIFICADOS", - "\u00c3\u0089DITEURS DE CERTIFICATS", + "\u00c9DITEURS DE CERTIFICATS", "ZERTIFIKATHERAUSGEBER", "PENERBIT SERTIFIKAT", "\u00d0\u0098\u00d0\u00b7\u00d0\u00b4\u00d0\u00b0\u00d1\u0082\u00d0\u039c\u00d0\u00bb\u00d0\u00b8 \u00d1\u0081\u00d0\u039c\u00d1\u0080\u00d1\u0082\u00d0\u00b8\u00d1\u0084\u00d0\u00b8\u00d0\u00ba\u00d0\u00b0\u00d1\u0082\u00d0\u00be\u00d0\u00b2", @@ -405,7 +407,7 @@ "SCHEMA ADMINS", "SCHEMA_ADMINS", "ADMINISTRADORES DE ESQUEMA", - "ADMINISTRATEURS DU SCH\u00c3\u00a9MA", + "ADMINISTRATEURS DU SCH\u00c9MA", "SCHEMA-ADMINS", "ADMIN SKEMA", "AMMINISTRATORI SCHEMA", @@ -418,7 +420,7 @@ "ENTERPRISE_ADMINS", "ADMINISTRADORES DE EMPRESAS", "\u00c4\u00bc\u0081\u00c4\u00b8\u009a\u00c7\u00ae\u00a1\u00c7\u0090\u0086\u00c5\u0091\u0098", - "ADMINISTRATEURS DE L\u00c2\u0080\u0099ENTREPRISE", + "ADMINISTRATEURS DE L\u2019ENTREPRISE", "ORGANISATIONSADMINISTRATOREN", "ADMIN PERUSAHAAN", "AMMINISTRATORI ENTERPRISE", @@ -430,7 +432,7 @@ "GROUP POLICY CREATOR OWNERS", "POLICY_ADMINS", "PROPIETARIOS DEL CREADOR DE DIRECTIVAS DE GRUPO", - "PROPRI\u00c3\u00a9TAIRES CR\u00c3\u00a9ATEURS DE LA STRAT\u00c3\u00a9GIE DE GROUPE", + "PROPRI\u00c9TAIRES CR\u00c9ATEURS DE LA STRAT\u00c9GIE DE GROUPE", "GRUPPENRICHTLINIENERSTELLER-BESITZER", "PEMILIK PEMBUAT KEBIJAKAN GRUP", "PROPRIETARI AUTORI CRITERI DI GRUPPO", @@ -442,7 +444,7 @@ "READ-ONLY DOMAIN CONTROLLERS", "CONTROLADORES DE DOMINIO DE SOLO LECTURA", "\u00c5\u008f\u00aa\u00c8\u00af\u00bb\u00c5\u009f\u009f\u00c6\u008e\u00a7\u00c5\u0088\u00b6\u00c5\u0099\u00a8", - "CONTR\u00c3\u00b4LEURS DE DOMAINE EN LECTURE SEULE", + "CONTR\u00d4LEURS DE DOMAINE EN LECTURE SEULE", "READ-ONLY-DOM\u00c3\u00a4NENCONTROLLER", "PENGONTROL DOMAIN BACA-SAJA", "CONTROLLER DI DOMINIO DI SOLA LETTURA", @@ -457,6 +459,7 @@ "\u00c5\u008f\u00af\u00c5\u0085\u008b\u00c9\u009a\u0086\u00c7\u009a\u0084\u00c6\u008e\u00a7\u00c5\u0088\u00b6\u00c5\u0099\u00a8", "\u00c5\u008f\u00af\u00c8\u00a4\u0087\u00c8\u00a3\u00bd\u00c6\u008e\u00a7\u00c5\u0088\u00b6\u00c7\u00ab\u0099", "CONTR\u00c3\u00b4LEURS CLONABLES", + "CONTR\u00d4LEURS DE DOMAINE CLONABLES", "KLONBARE CONTROLLER", "KLONBARE DOM\u00c4NENCONTROLLER", "PENGONTROL YANG DAPAT DIKLONING", @@ -485,6 +488,7 @@ "\u00c5\u00af\u0086\u00c9\u0092\u00a5\u00c7\u00ae\u00a1\u00c7\u0090\u0086\u00c5\u0091\u0098", "\u00c9\u0087\u0091\u00c9\u0091\u00b0\u00c7\u00ae\u00a1\u00c7\u0090\u0086\u00c5\u0093\u00a1", "ADMINISTRATEURS DE CL\u00c3\u00a9S", + "ADMINISTRATEURS CL\u00c9S", "SCHL\u00c3\u00bcSSELADMINISTRATOREN", "ADMIN KUNCI", "AMMINISTRATORI CHIAVE", @@ -502,7 +506,8 @@ "\u00d0\u009a\u00d0\u00bb\u00d1\u008e\u00d1\u0087\u00d0\u039c\u00d0\u00b2\u00d1\u008b\u00d0\u039c \u00d0\u00b0\u00d0\u00b4\u00d0\u00bc\u00d0\u00b8\u00d0\u00bd\u00d0\u00b8\u00d1\u0081\u00d1\u0082\u00d1\u0080\u00d0\u00b0\u00d1\u0082\u00d0\u00be\u00d1\u0080\u00d1\u008b \u00d0\u00bf\u00d1\u0080\u00d0\u039c\u00d0\u00b4\u00d0\u00bf\u00d1\u0080\u00d0\u00b8\u00d1\u008f\u00d1\u0082\u00d0\u00b8\u00d1\u008f", "\u00cc\u0097\u0094\u00cd\u0084\u00b0\u00cd\u0094\u0084\u00cb\u009d\u00bc\u00cc\u009d\u00b4\u00cc\u00a6\u0088 \u00cd\u0082\u00a4 \u00ca\u00b4\u0080\u00cb\u00a6\u00ac\u00cc\u009e\u0090", "ADMINISTRADORES DE CHAVE CORPORATIVA", - "ENTERPRISE_KEY_ADMINS" + "ENTERPRISE_KEY_ADMINS", + "ADMINISTRATEURS CL\u00c9S ENTERPRISE" ], "544": [ "ADMINISTRATORS", @@ -751,7 +756,7 @@ "GRUPO CON PERMISO PARA REPLICAR CONTRASE\u00c3\u00b1AS EN RODC", "\u00c5\u0085\u0081\u00c8\u00ae\u00b8\u00c7\u009a\u0084 RODC \u00c5\u00af\u0086\u00c7\u00a0\u0081\u00c5\u00a4\u008d\u00c5\u0088\u00b6\u00c7\u00bb\u0084", "\u00c5\u0085\u0081\u00c8\u00a8\u00b1\u00c7\u009a\u0084 RODC \u00c5\u00af\u0086\u00c7\u00a2\u00bc\u00c8\u00a4\u0087\u00c5\u00af\u00ab\u00c7\u00be\u00a4\u00c7\u039c\u0084", - "GROUPE DE R\u00c3\u00a9PLICATION DONT LE MOT DE PASSE RODC EST AUTORIS\u00c3\u00a9", + "GROUPE DE R\u00c9PLICATION DONT LE MOT DE PASSE RODC EST AUTORIS\u00c9", "ZUL\u00c3\u00a4SSIGE RODC-KENNWORTREPLIKATIONSGRUPPE", "GRUP REPLIKASI KATA SANDI RODC YANG DIIZINKAN", "GRUPPO DI REPLICA PASSWORD DI CONTROLLER DI DOMINIO DI SOLA LETTURA CONSENTITO", @@ -766,7 +771,7 @@ "GRUPO SIN PERMISO PARA REPLICAR CONTRASE\u00c3\u00b1AS EN RODC", "\u00c6\u008b\u0092\u00c7\u00bb\u009d\u00c7\u009a\u0084 RODC \u00c5\u00af\u0086\u00c7\u00a0\u0081\u00c5\u00a4\u008d\u00c5\u0088\u00b6\u00c7\u00bb\u0084", "\u00c6\u008b\u0092\u00c7\u039c\u0095\u00c7\u009a\u0084 RODC \u00c5\u00af\u0086\u00c7\u00a2\u00bc\u00c8\u00a4\u0087\u00c5\u00af\u00ab\u00c7\u00be\u00a4\u00c7\u039c\u0084", - "GROUPE DE R\u00c3\u00a9PLICATION DONT LE MOT DE PASSE RODC EST REFUS\u00c3\u00a9", + "GROUPE DE R\u00c9PLICATION DONT LE MOT DE PASSE RODC EST REFUS\u00c9", "ABGELEHNTE RODC-KENNWORTREPLIKATIONSGRUPPE", "GRUP REPLIKASI KATA SANDI RODC DITOLAK", "GRUPPO DI REPLICA PASSWORD DI CONTROLLER DI DOMINIO DI SOLA LETTURA NEGATO", diff --git a/ad_miner/sources/modules/macro_graph_class.py b/ad_miner/sources/modules/macro_graph_class.py new file mode 100644 index 00000000..561c0d8f --- /dev/null +++ b/ad_miner/sources/modules/macro_graph_class.py @@ -0,0 +1,45 @@ +from ad_miner.sources.modules.common_analysis import createGraphPage +from hashlib import md5 + + +class MacroGraphPage: + def __init__(self): + self.baseName = '' + self.paths_dict = {} + for i in range(256): + hex_str = f"{i:02x}" + self.paths_dict[hex_str] = [] + + def addPathsAndGetLink(self, baseName, paths_list): + # To use with list of paths having the same starting point + key = getKeyFromID(paths_list[0].nodes[0].id) + + self.paths_dict[key] += paths_list + self.baseName = baseName + + return baseName + '_' + key + '.html' + + def addPathsInBulk(self, baseName, paths_list): + # Can be used with paths from different starting points + self.baseName = baseName + + for path in paths_list: + key = getKeyFromID(path.nodes[0].id) + self.paths_dict[key].append(path) + + def render_pages(self, arguments, requests_results, dico_description, title): + for i in range(256): + hex_str = f"{i:02x}" + + createGraphPage( + arguments.cache_prefix, + self.baseName + '_' + hex_str, + title, + dico_description, + self.paths_dict[hex_str], + requests_results, + ) + + +def getKeyFromID(id): + return str(md5(str(id).encode()).hexdigest())[0:2] diff --git a/ad_miner/sources/modules/main_page.py b/ad_miner/sources/modules/main_page.py index ec12526d..ca4b834e 100644 --- a/ad_miner/sources/modules/main_page.py +++ b/ad_miner/sources/modules/main_page.py @@ -27,13 +27,15 @@ def getData(arguments, requests_results): # Header data["render_name"] = arguments.cache_prefix data["date"] = f"{extract_date[-2:]}/{extract_date[-4:-2]}/{extract_date[:4]}" + data["AD_miner_version"] = arguments.version + data["AD_miner_commit"] = arguments.commit # Stats on the left data["nb_domains"] = len(requests_results["domains"]) data["nb_domain_collected"] = len(requests_results["nb_domain_collected"]) data["domain_or_domains"] = common_analysis.manage_plural( - data["nb_domains"], ("Domain", "Domains") + data["nb_domains"], ("domain", "domains") ) data["nb_dc"] = americanStyle(len(requests_results["nb_domain_controllers"])) @@ -149,6 +151,7 @@ def complete_data_evolution_time( dico_color_category_origin = raw_other_list_data[k]["color_category"] dico_color_category = {"on_premise": {}, "azure": {}} + for key in dico_color_category_origin: if key in dico_category_invert: category_repartition = category_repartition_dict[ @@ -249,6 +252,8 @@ def populate_dico_data(data, dico_data, arguments, requests_results, dico_rating """ dico_data["datetime"] = data["date"] dico_data["render_name"] = arguments.cache_prefix + dico_data["AD_miner_version"] = data["AD_miner_version"] + dico_data["AD_miner_commit"] = data["AD_miner_commit"] dico_data["general_statistic"] = { "nb_domains": len(requests_results["domains"]), "nb_dc": len(requests_results["nb_domain_controllers"]), @@ -560,7 +565,7 @@ def render( data[category_repartition][ global_risk_controls[risk_control]["panel_key"] ] = "" - red_status = f""" {risk_control.replace("_", " ").capitalize()}""" + red_status = f"""{risk_control.replace("_", " ").capitalize()}""" for issue in data[category_repartition][f"{risk_control}_list"]: custom_title = dico_name_description[issue].replace("$", "") data[category_repartition][ diff --git a/ad_miner/sources/modules/neo4j_class.py b/ad_miner/sources/modules/neo4j_class.py index 75573c4f..3a73bf12 100644 --- a/ad_miner/sources/modules/neo4j_class.py +++ b/ad_miner/sources/modules/neo4j_class.py @@ -17,6 +17,7 @@ from ad_miner.sources.modules.graph_class import Graph from ad_miner.sources.modules.node_neo4j import Node from ad_miner.sources.modules.path_neo4j import Path +from ad_miner.sources.modules.macro_graph_class import MacroGraphPage from ad_miner.sources.modules.utils import timer_format, grid_data_stringify from ad_miner.sources.modules.common_analysis import createGraphPage @@ -57,9 +58,7 @@ def pre_request(arguments): try: with driver.session() as session: with session.begin_transaction() as tx: - for record in tx.run( - "MATCH (a) WHERE a.lastlogon IS NOT NULL return toInteger(a.lastlogon) as last order by last desc LIMIT 1" - ): + for record in tx.run("MATCH (a) WHERE a.lastlogon IS NOT NULL return toInteger(a.lastlogon) as last order by last desc LIMIT 1"): date_lastlogon = record.data() driver.close() @@ -74,9 +73,7 @@ def pre_request(arguments): try: with driver.session() as session: with session.begin_transaction() as tx: - for record in tx.run( - "CALL dbms.components() YIELD versions RETURN versions[0] AS version" - ): + for record in tx.run("CALL dbms.components() YIELD versions RETURN versions[0] AS version"): neo4j_version = record.data() driver.close() @@ -87,13 +84,9 @@ def pre_request(arguments): sys.exit(-1) try: - extract_date = datetime.datetime.fromtimestamp(date_lastlogon["last"]).strftime( - "%Y%m%d" - ) + extract_date = datetime.datetime.fromtimestamp(date_lastlogon["last"]).strftime("%Y%m%d") except UnboundLocalError as e: - logger.print_warning( - "No LastLogon, the date of the report will be today's date" - ) + logger.print_warning("No LastLogon, the date of the report will be today's date") extract_date_timestamp = datetime.date.today() extract_date = extract_date_timestamp.strftime("%Y%m%d") @@ -101,17 +94,13 @@ def pre_request(arguments): with session.begin_transaction() as tx: total_objects = [] boolean_azure = False - for record in tx.run( - "MATCH (x) return labels(x), count(labels(x)) AS number_type" - ): + for record in tx.run("MATCH (x) return labels(x), count(labels(x)) AS number_type"): total_objects.append(record.data()) for record in tx.run("MATCH ()-[r]->() RETURN count(r) AS total_relations"): number_relations = record.data()["total_relations"] - for record in tx.run( - "MATCH (n) WHERE n.tenantid IS NOT NULL return n LIMIT 1" - ): + for record in tx.run("MATCH (n) WHERE EXISTS(n.tenantid) return n LIMIT 1"): boolean_azure = bool(record.data()["n"]) driver.close() @@ -119,6 +108,21 @@ def pre_request(arguments): return neo4j_version, extract_date, total_objects, number_relations, boolean_azure +class Nodes_cache: + def __init__(self): + # Dict with AD Miner nodes objects (node + relation type) + # Key should be the concatened string ID_relation_type + # This allows to optimize RAM use and avoid creating redondant objects + self.ad_miner_nodes = {} + + def get_node(self, id, labels, name, domain, tenant_id, relation_type) -> Node: + key = str(id) + "_" + relation_type + if key not in self.ad_miner_nodes: + node = Node(id, labels, name, domain, tenant_id, relation_type) + self.ad_miner_nodes[key] = node + return self.ad_miner_nodes[key] + + class Neo4j: def __init__(self, arguments, extract_date_int, boolean_azure): # remote computers that run requests with their number of core @@ -136,9 +140,7 @@ def __init__(self, arguments, extract_date_int, boolean_azure): self.cluster[ip + ":" + port] = int(nCore) arguments.nb_chunks += 20 * int(nCore) except ValueError as e: - logger.print_error( - "An error occured while parsing the cluster argument. The correct syntax is --cluster ip1:port1:nCores1,ip2:port2:nCores2,etc" - ) + logger.print_error("An error occured while parsing the cluster argument. The correct syntax is --cluster ip1:port1:nCores1,ip2:port2:nCores2,etc") logger.print_error(e) sys.exit(-1) if len(self.cluster) == 1: @@ -161,10 +163,8 @@ def __init__(self, arguments, extract_date_int, boolean_azure): recursive_level = arguments.level self.password_renewal = int(arguments.renewal_password) - properties = "MemberOf|HasSession|AdminTo|AllExtendedRights|AddMember|ForceChangePassword|GenericAll|GenericWrite|Owns|WriteDacl|WriteOwner|ExecuteDCOM|AllowedToDelegate|ReadLAPSPassword|Contains|GPLink|AddAllowedToAct|AllowedToAct|SQLAdmin|ReadGMSAPassword|HasSIDHistory|CanPSRemote|AddSelf|WriteSPN|AddKeyCredentialLink|SyncLAPSPassword|CanExtractDCSecrets|CanLoadCode|CanLogOnLocallyOnDC|UnconstrainedDelegations|WriteAccountRestrictions|DumpSMSAPassword|Synced|AZRunsAs|SyncedToADUser|SyncedToEntraUser|GoldenCert|WriteGPLink|ADCSESC1|ADCSESC2|ADCSESC3|ADCSESC4|ADCSESC5|ADCSESC6a|ADCSESC6b|ADCSESC7|ADCSESC8|ADCSESC9a|ADCSESC9b|ADCSESC10a|ADCSESC10b|ADCSESC11|ADCSESC12|ADCSESC13|ADCSESC15|DCSync" - path_to_group_operators_props = properties.replace( - "|CanExtractDCSecrets|CanLoadCode|CanLogOnLocallyOnDC", "" - ) + properties = "MemberOf|HasSession|AdminTo|AllExtendedRights|AddMember|ForceChangePassword|GenericAll|GenericWrite|Owns|WriteDacl|WriteOwner|ExecuteDCOM|AllowedToDelegate|ReadLAPSPassword|Contains|GPLink|AddAllowedToAct|AllowedToAct|SQLAdmin|ReadGMSAPassword|HasSIDHistory|CanPSRemote|AddSelf|WriteSPN|AddKeyCredentialLink|SyncLAPSPassword|CanExtractDCSecrets|CanLoadCode|CanLogOnLocallyOnDC|UnconstrainedDelegations|WriteAccountRestrictions|DumpSMSAPassword|Synced|AZRunsAs|SyncedToADUser|SyncedToEntraUser|GoldenCert|WriteGPLink|ADCSESC1|ADCSESC2|ADCSESC3|ADCSESC4|ADCSESC5|ADCSESC6a|ADCSESC6b|ADCSESC7|ADCSESC8|ADCSESC9a|ADCSESC9b|ADCSESC10a|ADCSESC10b|ADCSESC11|ADCSESC12|ADCSESC13|ADCSESC15|DCSync|CoerceToTGT|WriteOwnerRaw|OwnsRaw|SameForestTrust|CoerceAndRelayNTLMToLDAPS|SpoofSIDHistory|CoerceAndRelayNTLMToLDAP|CoerceAndRelayNTLMToSMB|CoerceAndRelayNTLMToADCS|CanApplyGPO|ContainsIdentity|GPOAppliesTo|HasTrustKeys|MemberOfLocalGroup|PropagatesACEsTo|ClaimSpecialIdentity|ProtectAdminGroups" + path_to_group_operators_props = properties.replace("|CanExtractDCSecrets|CanLoadCode|CanLogOnLocallyOnDC", "") if boolean_azure: properties += "|AZAKSContributor|AZAddMembers|AZAddOwner|AZAddSecret|AZAutomationContributor|AZAvereContributor|AZCloudAppAdmin|AZContains|AZContributor|AZExecuteCommand|AZGetCertificates|AZGetKeys|AZGetSecrets|AZGlobalAdmin|AZHasRole|AZKeyVaultContributor|AZLogicAppContributor|AZMGAddMember|AZMGAddOwner|AZMGAddSecret|AZMGAppRoleAssignment_ReadWrite_All|AZMGApplication_ReadWrite_All|AZMGDirectory_ReadWrite_All|AZMGGrantAppRoles|AZMGGrantRole|AZMGGroupMember_ReadWrite_All|AZMGGroup_ReadWrite_All|AZMGRoleManagement_ReadWrite_Directory|AZMGServicePrincipalEndpoint_ReadWrite_All|AZManagedIdentity|AZMemberOf|AZNodeResourceGroup|AZOwner|AZOwns|AZPrivilegedAuthAdmin|AZPrivilegedRoleAdmin|AZResetPassword|AZRunAs|AZScopedTo|AZUserAccessAdministrator|AZVMAdminLogin|AZVMContributor|AZWebsiteContributor" @@ -174,22 +174,16 @@ def __init__(self, arguments, extract_date_int, boolean_azure): self.properties = properties - inbound_control_edges = "MemberOf|AddSelf|WriteSPN|AddKeyCredentialLink|AddMember|AllExtendedRights|ForceChangePassword|GenericAll|GenericWrite|WriteDacl|WriteOwner|Owns|HasSIDHistory" + inbound_control_edges = "MemberOf|AddSelf|WriteSPN|AddKeyCredentialLink|AddMember|AllExtendedRights|ForceChangePassword|GenericAll|GenericWrite|WriteDacl|WriteOwner|Owns|HasSIDHistory|WriteOwnerRaw|OwnsRaw" try: - self.all_requests = json.loads( - (MODULES_DIRECTORY / "requests.json").read_text(encoding="utf-8") - ) + self.all_requests = json.loads((MODULES_DIRECTORY / "requests.json").read_text(encoding="utf-8")) del self.all_requests["template"] for request_key in self.all_requests.keys(): # Replace methods with python methods - self.all_requests[request_key]["output_type"] = { - "Graph": Graph, - "list": list, - "dict": dict, - }.get( + self.all_requests[request_key]["output_type"] = {"Graph": Graph, "list": list, "dict": dict, "mixed": "mixed", "fastGDS": "fastGDS"}.get( self.all_requests[request_key]["output_type"], ) # Replace variables with their values in requests @@ -215,30 +209,16 @@ def __init__(self, arguments, extract_date_int, boolean_azure): for field in fields_to_replace: if field in self.all_requests[request_key]: - self.all_requests[request_key][field] = self.all_requests[ - request_key - ][field].replace( - variable, str(variables_to_replace[variable]) - ) + self.all_requests[request_key][field] = self.all_requests[request_key][field].replace(variable, str(variables_to_replace[variable])) # Replace postprocessing with python method if "postProcessing" in self.all_requests[request_key]: - self.all_requests[request_key]["postProcessing"] = { - "Neo4j.setDangerousInboundOnGPOs": self.setDangerousInboundOnGPOs, - "Neo4j.check_gds_plugin": self.check_gds_plugin, - "Neo4j.check_unkown_relations": self.check_unkown_relations, - "Neo4j.check_all_domain_objects_exist": self.check_all_domain_objects_exist, - "Neo4j.check_relation_type": self.check_relation_type, - }.get(self.all_requests[request_key]["postProcessing"]) + self.all_requests[request_key]["postProcessing"] = {"Neo4j.setDangerousInboundOnGPOs": self.setDangerousInboundOnGPOs, "Neo4j.check_gds_plugin": self.check_gds_plugin, "Neo4j.check_unkown_relations": self.check_unkown_relations, "Neo4j.check_all_domain_objects_exist": self.check_all_domain_objects_exist, "Neo4j.check_relation_type": self.check_relation_type, "Neo4j.create_nodes_cache": self.create_nodes_cache}.get(self.all_requests[request_key]["postProcessing"]) except json.JSONDecodeError as error: - logger.print_error( - f"Error while parsing neo4j requests from requests.json : \n{error}" - ) + logger.print_error(f"Error while parsing neo4j requests from requests.json : \n{error}") sys.exit(-1) except FileNotFoundError: - logger.print_error( - f"Neo4j request file not found : {MODULES_DIRECTORY / 'requests.json'} no such file." - ) + logger.print_error(f"Neo4j request file not found : {MODULES_DIRECTORY / 'requests.json'} no such file.") sys.exit(-1) if arguments.gpo_low: del self.all_requests["unpriv_users_to_GPO_init"] @@ -250,20 +230,12 @@ def __init__(self, arguments, extract_date_int, boolean_azure): else: # Deep version of GPO requests del self.all_requests["unpriv_users_to_GPO"] try: - self.edges_rating = json.loads( - (MODULES_DIRECTORY / "exploitability_ratings.json").read_text( - encoding="utf-8" - ) - ) + self.edges_rating = json.loads((MODULES_DIRECTORY / "exploitability_ratings.json").read_text(encoding="utf-8")) except json.JSONDecodeError as error: - logger.print_error( - f"Error while parsing exploitability ratings from exploitability_ratings.json : \n{error}" - ) + logger.print_error(f"Error while parsing exploitability ratings from exploitability_ratings.json : \n{error}") sys.exit(-1) except FileNotFoundError: - logger.print_error( - f"Exploitability ratings file not found : {MODULES_DIRECTORY / 'exploitability_ratings.json'} no such file." - ) + logger.print_error(f"Exploitability ratings file not found : {MODULES_DIRECTORY / 'exploitability_ratings.json'} no such file.") sys.exit(-1) try: @@ -283,13 +255,14 @@ def __init__(self, arguments, extract_date_int, boolean_azure): logger.print_error(e) sys.exit(-1) + global nodes_cache + nodes_cache = Nodes_cache() + def close(self): self.driver.close() @staticmethod - def executeParallelRequest( - value, identifier, query, arguments, output_type, server, gds_cost_type_table - ): + def executeParallelRequest(value, identifier, query, arguments, output_type, server, gds_cost_type_table): """This function is used in multiprocessing pools to execute multiple query parts in parallel""" q = query.replace("PARAM1", str(value)).replace("PARAM2", str(identifier)) @@ -304,25 +277,35 @@ def executeParallelRequest( with session.begin_transaction() as tx: if output_type is Graph: for record in tx.run(q): - result.append(record["p"]) - # Quick way to handle multiple records - # (e.g., RETURN p, p2) - if "p2" in record: - result.append(record["p2"]) + for column in record.keys(): + result.append(record[column]) try: result = Neo4j.computePathObject(result, gds_cost_type_table) except Exception as e: - logger.print_error( - "An error while computing path object of this query:\n" + q - ) + logger.print_error("An error while computing path object of this query:\n" + q) logger.print_error(e) else: result = tx.run(q) if output_type is list: result = result.values() - else: # then it should be dict ? + elif output_type is dict: result = result.data() + elif output_type == "mixed": + # This is ugly because you can't iterate multiple times on a Neo4j Result object + final_result = [] + for record in result: + temp_data = {} + keys = record.keys() + for key in keys: + if all(char == "p" for char in key): # Then it's a path :) + temp_data[key] = Neo4j.computePathObject([record[key]], gds_cost_type_table)[0] + else: + temp_data[key] = record[key] + final_result.append(temp_data) + result = final_result + else: + logger.print_error(f"Request ignored, unknown output type: {output_type}.") return result @@ -333,10 +316,7 @@ def process_request(self, request_key): if result is None: result = [] if result is not False: # Sometimes result = [] - logger.print_debug( - "From cache : %s - %d objects" - % (self.all_requests[request_key]["name"], len(result)) - ) + logger.print_debug("From cache : %s - %d objects" % (self.all_requests[request_key]["name"], len(result))) self.all_requests[request_key]["result"] = result if "postProcessing" in self.all_requests[request_key]: self.all_requests[request_key]["postProcessing"](self, result) @@ -379,6 +359,10 @@ def process_request(self, request_key): space = np.linspace(0, scopeSize, part_number + 1, dtype=int) output_type = self.all_requests[request_key]["output_type"] + # Fallback to classic mode if fastGDS and GDS not installed + if not self.gds and output_type == "fastGDS": + output_type = Graph + # Divide the request with SKIP & LIMIT for i in range(len(space) - 1): items.append( @@ -405,14 +389,9 @@ def process_request(self, request_key): if result is None: result = [] - if ( - "is_a_gds_request" in request - and self.gds - and "reverse_path" in request - and request["reverse_path"] - ): + if "is_a_gds_request" in request and self.gds and "reverse_path" in request and request["reverse_path"]: for path in result: - path.reverse() + path.reverse(nodes_cache) if "postProcessing" in request: request["postProcessing"](self, result) @@ -425,9 +404,7 @@ def process_request(self, request_key): tx.run(q) self.cache.createCacheEntry(request_key, result) - logger.print_warning( - timer_format(time.time() - start) + " - %d objects" % len(result) - ) + logger.print_warning(timer_format(time.time() - start) + " - %d objects" % len(result)) request["result"] = result return result @@ -440,18 +417,47 @@ def simpleRequest(self, request_key): with session.begin_transaction() as tx: if output_type is Graph: for record in tx.run(request["request"]): - result.append(record["p"]) - # Quick way to handle multiple records - # (e.g., RETURN p, p2) - if "p2" in record: - result.append(record["p2"]) - result = self.computePathObject(result, self.gds_cost_type_table) + for column in record.keys(): + result.append(record[column]) + try: + result = Neo4j.computePathObject(result, self.gds_cost_type_table) + except Exception as e: + logger.print_error("An error while computing path object of this query:\n" + request["request"]) + logger.print_error(e) else: result = tx.run(request["request"]) if output_type is list: result = result.values() - else: + elif output_type is dict: result = result.data() + elif output_type == "fastGDS": + temp_dicts = result.data() + logger.print_debug("Successfully retrieved data in python using fastGDS mode") + result = [] + for d in tqdm.tqdm(temp_dicts): + nodes_ID = d["nodeIds"] + costs = d["costs"] + costs.pop(0) # For whatever reason first value is always 0 + # Small adjustment: GDS returns the cumulative cost for each node + for i in range(1, len(costs)): + costs[-i] = costs[-i] - costs[-(i + 1)] + + result.append(Neo4j.constructPathFromFastGDS(self, nodes_ID, costs, self.gds_cost_type_table)) + elif output_type == "mixed": + # This is ugly because you can't iterate multiple times on a Neo4j Result object + final_result = [] + for record in result: + temp_data = {} + keys = record.keys() + for key in keys: + if all(char == "p" for char in key): # Then it's a path :) + temp_data[key] = Neo4j.computePathObject([record[key]], self.gds_cost_type_table)[0] + else: + temp_data[key] = record[key] + final_result.append(temp_data) + result = final_result + else: + logger.print_error(f"Request ignored, unknown output type: {output_type}.") return result @staticmethod @@ -461,18 +467,7 @@ def ClusterWriteRequest(self, request_key): starting_time = time.time() cluster_state = {server: False for server in self.cluster.keys()} query = self.all_requests[request_key]["request"] - items = [ # Create all requests to do - ( - -1, - -1, - query, - self.arguments, - self.all_requests[request_key]["output_type"], - server, - self.gds_cost_type_table, - ) - for server in self.cluster.keys() - ] + items = [(-1, -1, query, self.arguments, self.all_requests[request_key]["output_type"], server, self.gds_cost_type_table) for server in self.cluster.keys()] # Create all requests to do with mp.Pool(len(self.cluster)) as pool: result = [] @@ -484,13 +479,7 @@ def ClusterWriteRequest(self, request_key): for server in tasks.keys(): if tasks[server].ready() and not cluster_state[server]: cluster_state[server] = True - logger.print_success( - "Write query executed by " - + server - + " in " - + str(round(time.time() - starting_time, 2)) - + "s." - ) + logger.print_success("Write query executed by " + server + " in " + str(round(time.time() - starting_time, 2)) + "s.") temp_results = [task.get() for task in tasks.values()] result = temp_results[0] # Same request executed on every node, we only need the result once @@ -513,9 +502,7 @@ def parallelRequestCluster(self, items): temp_results = [] - def process_completed_task( - number_of_retrieved_objects, task, active_jobs, jobs_done, pbar - ): + def process_completed_task(number_of_retrieved_objects, task, active_jobs, jobs_done, pbar): temporary_result = task.get() # Update displayed number of retrieved objects if output_type == list: @@ -546,12 +533,7 @@ def process_completed_task( ) + "% " ) - pbar.set_description( - cluster_participation - + "| " - + str(number_of_retrieved_objects) - + " objects" - ) + pbar.set_description(cluster_participation + "| " + str(number_of_retrieved_objects) + " objects") pbar.refresh() pbar.update(1) return number_of_retrieved_objects @@ -584,26 +566,11 @@ def process_completed_task( if len(active_jobs[server]) < max_jobs: item = requestList.pop() - ( - value, - identifier, - query, - arguments, - output_type, - self.gds_cost_type_table, - ) = item + (value, identifier, query, arguments, output_type, self.gds_cost_type_table, executeParallelRequest) = item task = pool.apply_async( self.executeParallelRequest, - ( - value, - identifier, - query, - arguments, - output_type, - server, - self.gds_cost_type_table, - ), + (value, identifier, query, arguments, output_type, server, self.gds_cost_type_table, executeParallelRequest), ) temp_results.append(task) active_jobs[server].append(task) @@ -632,18 +599,7 @@ def process_completed_task( def parallelRequestLegacy(self, items): """parallelRequestLegacy is the default way of slicing requests in smaller requests to parallelize it""" - items = [ # Add bolt to items - ( - value, - identifier, - query, - arguments, - output_type, - self.arguments.bolt, - gds_cost_type_table, - ) - for value, identifier, query, arguments, output_type, gds_cost_type_table in items - ] + items = [(value, identifier, query, arguments, output_type, self.arguments.bolt, gds_cost_type_table) for value, identifier, query, arguments, output_type, gds_cost_type_table in items] # Add bolt to items with mp.Pool(mp.cpu_count()) as pool: result = [] @@ -741,9 +697,7 @@ def verify_integrity(self): stopping_time = time.time() - logger.print_warning( - "Integrity check took " + str(round(stopping_time - startig_time, 2)) + "s" - ) + logger.print_warning("Integrity check took " + str(round(stopping_time - startig_time, 2)) + "s") @staticmethod def parallelWriteRequestCluster(self, items): @@ -756,13 +710,7 @@ def parallelWriteRequestCluster(self, items): output_type = items[0][4] - small_requests_to_do = { - server: [ - (value, identifier, query, arguments, output_type, server) - for value, identifier, query, arguments, output_type in items - ] - for server in self.cluster.keys() - } + small_requests_to_do = {server: [(value, identifier, query, arguments, output_type, server) for value, identifier, query, arguments, output_type in items] for server in self.cluster.keys()} cluster_state = {server: False for server in self.cluster.keys()} pbar = tqdm.tqdm( @@ -789,23 +737,10 @@ def parallelWriteRequestCluster(self, items): if task.ready(): active_jobs[server].remove(task) pbar.update(1) - if ( - len(small_requests_to_do[server]) == 0 - and len(active_jobs[server]) == 0 - and not cluster_state[server] - ): + if len(small_requests_to_do[server]) == 0 and len(active_jobs[server]) == 0 and not cluster_state[server]: cluster_state[server] = True - logger.print_success( - "Write request executed by " - + server - + " in " - + str(round(time.time() - starting_time, 2)) - + "s." - ) - if ( - len(active_jobs[server]) < max_jobs - and len(small_requests_to_do[server]) > 0 - ): + logger.print_success("Write request executed by " + server + " in " + str(round(time.time() - starting_time, 2)) + "s.") + if len(active_jobs[server]) < max_jobs and len(small_requests_to_do[server]) > 0: item = small_requests_to_do[server].pop() ( value, @@ -818,15 +753,7 @@ def parallelWriteRequestCluster(self, items): task = pool.apply_async( self.executeParallelRequest, - ( - value, - identifier, - query, - arguments, - output_type, - server, - self.gds_cost_type_table, - ), + (value, identifier, query, arguments, output_type, server, self.gds_cost_type_table), ) if server == next(iter(self.cluster)): temp_results.append(task) @@ -842,19 +769,9 @@ def parallelWriteRequestCluster(self, items): if task.ready(): active_jobs[server].remove(task) pbar.update(1) - if ( - len(small_requests_to_do[server]) == 0 - and len(active_jobs[server]) == 0 - and not cluster_state[server] - ): + if len(small_requests_to_do[server]) == 0 and len(active_jobs[server]) == 0 and not cluster_state[server]: cluster_state[server] = True - logger.print_success( - "Write request executed to " - + server - + " in " - + str(round(time.time() - starting_time, 2)) - + "s." - ) + logger.print_success("Write request executed to " + server + " in " + str(round(time.time() - starting_time, 2)) + "s.") for r in temp_results: result += r.get() pbar.close() @@ -870,38 +787,35 @@ def computePathObject(self, Paths, gds_cost_type_table): nodes = [] for relation in path.relationships: rtype = relation.type + if "PATH_" in rtype: gds_identifier = round(float(relation.get("cost")), 3) gds_identifier = round(1000 * (gds_identifier % 1)) rtype = gds_cost_type_table[gds_identifier] - for node in relation.nodes: - label = [i for i in node.labels if "Base" not in i][ + label = [ + i for i in node.labels if "Base" not in i and "Tag_Tier_Zero" not in i + ][ 0 ] # e.g. : {"User","Base"} -> "User" or {"User","AZBase"} -> "User" - nodes.append( - Node( - node.id, - label, - node["name"], - node["domain"], - node["tenantid"], - rtype, - ) - ) + node = nodes_cache.get_node(node.id, label, node["name"], node["domain"], node["tenantid"], rtype) + nodes.append(node) break - nodes.append( - Node( - path.end_node.id, - [i for i in path.end_node.labels if "Base" not in i][0], - path.end_node["name"], - path.end_node["domain"], - path.end_node["tenantid"], - "", - ) + node = nodes_cache.get_node( + path.end_node.id, + [ + i + for i in path.end_node.labels + if "Base" not in i and "Tag_Tier_Zero" not in i + ][0], + path.end_node["name"], + path.end_node["domain"], + path.end_node["tenantid"], + "", ) + nodes.append(node) final_paths.append(Path(nodes)) @@ -955,10 +869,7 @@ def check_unkown_relations(self, result): for i in range(len(relation_list)): r = relation_list[i] if r not in self.edges_rating.keys(): - logger.print_warning( - r - + " relation type is unknown and will use default exploitability rating." - ) + logger.print_warning(r + " relation type is unknown and will use default exploitability rating.") q = "MATCH ()-[r:" q += str(r) q += "]->() SET r.cost=r.cost + " @@ -972,9 +883,7 @@ def compute_common_cache(self, requests_results): It adds it to the requests_results dictionnary It is mainly populated with legacy code from domains.py, computers.py, etc """ - computers_with_last_connection_date = requests_results[ - "computers_not_connected_since" - ] + computers_with_last_connection_date = requests_results["computers_not_connected_since"] groups = requests_results["nb_groups"] computers_nb_domain_controllers = requests_results["nb_domain_controllers"] users_dormant_accounts = requests_results["dormant_accounts"] @@ -986,11 +895,7 @@ def compute_common_cache(self, requests_results): computers_with_last_connection_date, ) ) - users_not_connected_for_3_months = ( - [user["name"] for user in users_dormant_accounts if user["days"] > 90] - if users_dormant_accounts is not None - else None - ) + users_not_connected_for_3_months = [user["name"] for user in users_dormant_accounts if user["days"] > 90] if users_dormant_accounts is not None else None dico_ghost_computer = {} if computers_not_connected_since_60 != []: @@ -1112,41 +1017,57 @@ def compute_common_cache(self, requests_results): requests_results["dico_gpo_to_da"] = dico_gpo_to_da logger.print_debug("[Done]") + logger.print_debug("Computing common cache") + # Some of the following is duplicated / unoptimized / dirty - if not requests_results["users_admin_on_servers_1"]: - requests_results["users_admin_on_servers_1"] = [] - if not requests_results["users_admin_on_servers_2"]: - requests_results["users_admin_on_servers_2"] = [] + try: + if "users_admin_on_servers_1" not in requests_results: + requests_results["users_admin_on_servers_1"] = [] + if "users_admin_on_servers_2" not in requests_results: + requests_results["users_admin_on_servers_2"] = [] + + users_admin_on_servers_all_data = ( + requests_results["users_admin_on_servers_1"] + + requests_results["users_admin_on_servers_2"] + ) + users_admin_on_servers_all_data = [ + dict(t) for t in {tuple(d.items()) for d in users_admin_on_servers_all_data} + ] + users_admin_on_servers = generic_computing.getCountValueFromKey( + users_admin_on_servers_all_data, "computer" + ) + users_admin_on_servers_list = generic_computing.getListAdminTo( + users_admin_on_servers_all_data, + "computer", + "user", + ) - users_admin_on_servers_all_data = ( - requests_results["users_admin_on_servers_1"] - + requests_results["users_admin_on_servers_2"] - ) - users_admin_on_servers_all_data = [ - dict(t) for t in {tuple(d.items()) for d in users_admin_on_servers_all_data} - ] - users_admin_on_servers = generic_computing.getCountValueFromKey( - users_admin_on_servers_all_data, "computer" - ) - users_admin_on_servers_list = generic_computing.getListAdminTo( - users_admin_on_servers_all_data, - "computer", - "user", - ) + if users_admin_on_servers is not None and users_admin_on_servers != {}: + servers_with_most_paths = users_admin_on_servers[ + list(users_admin_on_servers.keys())[0] + ] + else: + servers_with_most_paths = [] - if users_admin_on_servers is not None and users_admin_on_servers != {}: - servers_with_most_paths = users_admin_on_servers[ - list(users_admin_on_servers.keys())[0] - ] - else: - servers_with_most_paths = [] + requests_results["users_admin_on_servers_list"] = users_admin_on_servers_list + requests_results["servers_with_most_paths"] = servers_with_most_paths + requests_results["users_admin_on_servers"] = users_admin_on_servers + requests_results["users_admin_on_servers_all_data"] = users_admin_on_servers_all_data + except KeyError as ke: + logger.print_error(f"KeyError while generating users admin on servers data: {ke}") - requests_results["users_admin_on_servers_list"] = users_admin_on_servers_list - requests_results["servers_with_most_paths"] = servers_with_most_paths - requests_results["users_admin_on_servers"] = users_admin_on_servers - requests_results["users_admin_on_servers_all_data"] = ( - users_admin_on_servers_all_data - ) + # Dico for ACL anomaly (and potential others) to known how many admin privs a user have + tmp_admin_computer_names = {} + + for d in requests_results["users_admin_on_computers"]: + if d["user"] not in tmp_admin_computer_names: + tmp_admin_computer_names[d["user"]] = {} + tmp_admin_computer_names[d["user"]][d["computer"]] = 0 + + users_admin_on_computers_count = {} + for user in tmp_admin_computer_names: + users_admin_on_computers_count[user] = len(tmp_admin_computer_names[user].keys()) + requests_results["users_admin_on_computers_count"] = users_admin_on_computers_count # Dico for ACL anomaly and futur other controls to retrieve paths to DA on computer ID dico_paths_computers_to_DA = {} @@ -1161,9 +1082,7 @@ def compute_common_cache(self, requests_results): dico_is_user_admin_on_computer = {} for d in users_admin_on_computers: dico_is_user_admin_on_computer[d["user"]] = True - requests_results["dico_is_user_admin_on_computer"] = ( - dico_is_user_admin_on_computer - ) + requests_results["dico_is_user_admin_on_computer"] = dico_is_user_admin_on_computer # Dico for kerberoastable users to add them to graphs dico_is_kerberoastable = {} @@ -1172,14 +1091,41 @@ def compute_common_cache(self, requests_results): requests_results["dico_is_kerberoastable"] = dico_is_kerberoastable - list_computers_admin_computers = requests_results[ - "computers_admin_on_computers" - ] - computers_admin_to_count = generic_computing.getCountValueFromKey( - list_computers_admin_computers, "source_computer" - ) + # Dico for disabled users + dico_is_disabled = {} + for d in requests_results["nb_disabled_accounts"]: + dico_is_disabled[d["name"]] = True + + requests_results["dico_is_disabled"] = dico_is_disabled + + list_computers_admin_computers = requests_results["computers_admin_on_computers"] + computers_admin_to_count = generic_computing.getCountValueFromKey(list_computers_admin_computers, "source_computer") requests_results["computers_admin_to_count"] = computers_admin_to_count + logger.print_debug("[Done]") + logger.print_debug("Generating objects to DA generic pages") + + macrographpages_objects_to_DA = MacroGraphPage() + + macrographpages_objects_to_DA.addPathsInBulk( + "object_to_domain_admin", objects_to_domain_admin + ) + + dico_description_to_DA = { + "description": "Paths leading to domain admin", + "risk": "Compromission paths to domain admin represent the exposed attack surface that the AD environment presents to the attacker in order to gain privileges in the domain(s). If an attacker exploits one of these paths, they will be able to gain privileges in the domain(s) and cause some serious damage.", + "poa": "Review the paths, make sure they are not exploitable. If they are, cut the link between the Active Directory objects in order to reduce the attack surface.", + } + + macrographpages_objects_to_DA.render_pages( + self.arguments, + requests_results, + dico_description_to_DA, + "Path to Domain Admin privileges", + ) + + logger.print_debug("[Done]") + @staticmethod def check_all_domain_objects_exist(self, result): objects_with_unexisting_domains = result[0][0] @@ -1208,7 +1154,6 @@ def check_relation_type(self, result): "ManageCA", "ManageCertificates", "RootCAFor", - "TrustedBy", "GetChanges", "GetChangesInFilteredSet", "GetChangesAll", @@ -1221,6 +1166,9 @@ def check_relation_type(self, result): "RemoteInteractiveLogonPrivilege", "EnrollOnBehalfOf", "ManageCA", + "RemoteInteractiveLogonRight", + "CrossForestTrust", + "LocalToComputer", ] if not self.arguments.rdp: @@ -1239,7 +1187,44 @@ def check_relation_type(self, result): unused_relations = unused_relations[:-2] if len(unused_relations) > 0: - logger.print_error( - "The following relations are not used (yet) for general AD Miner path finding:" - ) + logger.print_error("The following relations are not used (yet) for general AD Miner path finding:") logger.print_error(unused_relations) + + @staticmethod + def create_nodes_cache(self, result): + self.nodes_dict = {} + for d in result: + self.nodes_dict[d["id"]] = {"labels": d["labels"], "name": d["name"], "domain": d["domain"], "tenant_id": d["tenant_id"]} + + @staticmethod + def constructPathFromFastGDS(self, nodes_list, costs_list, gds_cost_type_table): + + nodes = [] + for i in range(len(nodes_list) - 1): + gds_identifier = round(float(costs_list[i]), 3) + gds_identifier = round(1000 * (gds_identifier % 1)) + rtype = gds_cost_type_table[gds_identifier] + + node_id = nodes_list[i] + labels = self.nodes_dict[node_id]["labels"] + label = [i for i in labels if "Base" not in i and "Tag_Tier_Zero" not in i][0] + name = self.nodes_dict[node_id]["name"] + domain = self.nodes_dict[node_id]["domain"] + tenant_id = self.nodes_dict[node_id]["tenant_id"] + + node = nodes_cache.get_node(node_id, label, name, domain, tenant_id, rtype) + + nodes.append(node) + + last_node_id = nodes_list[-1] + labels = self.nodes_dict[last_node_id]["labels"] + label = [i for i in labels if "Base" not in i and "Tag_Tier_Zero" not in i][0] + name = self.nodes_dict[last_node_id]["name"] + domain = self.nodes_dict[last_node_id]["domain"] + tenant_id = self.nodes_dict[last_node_id]["tenant_id"] + + node = nodes_cache.get_node(last_node_id, label, name, domain, tenant_id, "") + + nodes.append(node) + + return Path(nodes) diff --git a/ad_miner/sources/modules/path_neo4j.py b/ad_miner/sources/modules/path_neo4j.py index 8d849e84..5b39971a 100755 --- a/ad_miner/sources/modules/path_neo4j.py +++ b/ad_miner/sources/modules/path_neo4j.py @@ -13,8 +13,12 @@ def __eq__(self, other): ret = ret and (self.nodes[i] == other.nodes[i]) return ret - def reverse(self): + def reverse(self, nodes_cache): self.nodes.reverse() + new_nodes = [] for i in range(len(self.nodes) - 1): - self.nodes[i].relation_type = self.nodes[i + 1].relation_type - self.nodes[-1].relation_type = "" + node = nodes_cache.get_node(self.nodes[i].id, self.nodes[i].labels, self.nodes[i].name, self.nodes[i].domain, self.nodes[i].tenant_id, self.nodes[i + 1].relation_type) + new_nodes.append(node) + node = nodes_cache.get_node(self.nodes[-1].id, self.nodes[-1].labels, self.nodes[-1].name, self.nodes[-1].domain, self.nodes[-1].tenant_id, "") + new_nodes.append(node) + self.nodes = new_nodes diff --git a/ad_miner/sources/modules/requests.json b/ad_miner/sources/modules/requests.json index cdc5dacc..41394b26 100644 --- a/ad_miner/sources/modules/requests.json +++ b/ad_miner/sources/modules/requests.json @@ -30,7 +30,7 @@ }, "preparation_request_nodes": { "name": "Clean AD Miner custom attributes", - "request": "MATCH (n) REMOVE n.is_server,n.is_dc,n.is_da,n.is_dag,n.can_dcsync,n.path_candidate,n.ou_candidate,n.contains_da_dc,n.is_da_dc,n.ghost_computer,n.has_path_to_da,n.is_admin,n.is_group_operator,n.members_count,n.has_members,n.user_members_count,n.is_operator_member,n.is_group_account_operator,n.is_group_backup_operator,n.is_group_server_operator,n.is_group_print_operator,n.is_account_operator,n.is_backup_operator,n.is_server_operator,n.is_print_operator,n.gpolinks_count,n.has_links,n.dangerous_inbound, n.is_adminsdholder,n.is_dnsadmin,n.da_types,n.vulnerable_ou,n.can_abuse_adcs,n.dac,n.dac_types,n.is_adcs,n.target_kud,n.is_gag,n.is_msol,n.is_rbcd_target,n.is_dcg", + "request": "MATCH (n) REMOVE n.is_server,n.is_dc,n.is_da,n.is_dag,n.can_dcsync,n.path_candidate,n.ou_candidate,n.contains_da_dc,n.is_da_dc,n.ghost_computer,n.has_path_to_da,n.is_admin,n.is_group_operator,n.members_count,n.has_members,n.user_members_count,n.is_operator_member,n.is_group_account_operator,n.is_group_backup_operator,n.is_group_server_operator,n.is_group_print_operator,n.is_account_operator,n.is_backup_operator,n.is_server_operator,n.is_print_operator,n.gpolinks_count,n.has_links,n.dangerous_inbound, n.is_adminsdholder,n.is_dnsadmin,n.da_types,n.vulnerable_ou,n.can_abuse_adcs,n.dac,n.dac_types,n.is_adcs,n.target_kud,n.is_gag,n.is_msol,n.is_rbcd_target,n.is_dcg,n.esc7", "output_type": "list", "is_a_write_request": "true" }, @@ -40,12 +40,6 @@ "output_type": "list", "is_a_write_request": "true" }, - "delete_ADLocalGroup": { - "name": "Delete ADLocalGroup objects", - "request": "MATCH (d:ADLocalGroup) DETACH DELETE d", - "output_type": "list", - "is_a_write_request": "true" - }, "check_relation_types": { "name": "Checking relation types", "request": "MATCH ()-[r]->() RETURN DISTINCT type(r) as relationType", @@ -137,12 +131,6 @@ "output_type": "list", "is_a_write_request": "true" }, - "set_unconstrained_delegations": { - "name": "ADD UnconstrainedDelegations relation from objects with KUD to the corresponding domain", - "request": "MATCH (m{unconstraineddelegation:true,is_dc:false}) MATCH (d:Domain) WHERE m.domain = d.domain MERGE (m)-[:UnconstrainedDelegations]->(d) ", - "output_type": "list", - "is_a_write_request": "true" - }, "set_is_adminsdholder": { "name": "Set is_adminsdholder to Container with AdminSDHOLDER in name", "request": "MATCH (c:Container) WHERE c.name STARTS WITH \"ADMINSDHOLDER@\" SET c.is_adminsdholder=true ", @@ -254,12 +242,6 @@ "output_type": "list", "is_a_write_request": "true" }, - "del_fake_dc_admins": { - "name": "Delete AdminTo edges from non-DA to DC", - "request": "MATCH (g{is_da:false})-[rr:AdminTo]->(c:Computer{is_dc:true}) DETACH DELETE rr ", - "output_type": "list", - "is_a_write_request": "true" - }, "set_is_group_operator": { "name": "Set is_group_operator to Operator Groups (cf: ACCOUNT OPERATORS, SERVER OPERATORS, BACKUP OPERATORS, PRINT OPERATORS)", "request": "MATCH (g:Group) WHERE g.objectid ENDS WITH \"-551\" OR g.objectid ENDS WITH \"-549\" OR g.objectid ENDS WITH \"-548\" OR g.objectid ENDS WITH \"-550\" SET g.is_group_operator=True SET g.is_group_account_operator = CASE WHEN g.objectid ENDS WITH \"-548\" THEN true END, g.is_group_backup_operator = CASE WHEN g.objectid ENDS WITH \"-551\" THEN true END, g.is_group_server_operator = CASE WHEN g.objectid ENDS WITH \"-549\" THEN true END, g.is_group_print_operator = CASE WHEN g.objectid ENDS WITH \"-550\" THEN true END ", @@ -276,14 +258,14 @@ }, "set_dcsync1": { "name": "Set dcsync=TRUE to nodes that can DCSync (GetChanges/GetChangesAll)", - "request": "MATCH (n1) WITH n1 ORDER BY n1.name SKIP PARAM1 LIMIT PARAM2 MATCH p=allShortestPaths((n1)-[:MemberOf|GetChanges*1..5]->(u:Domain)) WHERE n1 <> u WITH n1 MATCH p2=(n1)-[:MemberOf|GetChangesAll*1..5]->(u:Domain) WHERE n1 <> u AND NOT n1.name IS NULL AND (((n1.is_da IS NULL OR n1.is_da=FALSE) AND (n1.is_dc IS NULL OR n1.is_dc=FALSE)) OR (NOT u.domain CONTAINS '.' + n1.domain AND n1.domain <> u.domain)) SET n1.can_dcsync=TRUE RETURN DISTINCT p2 as p", + "request": "MATCH (n1) WITH n1 ORDER BY ID(n1) SKIP PARAM1 LIMIT PARAM2 MATCH p=allShortestPaths((n1)-[:MemberOf|GetChanges*1..5]->(u:Domain)) WHERE n1 <> u WITH n1 MATCH p2=(n1)-[:MemberOf|GetChangesAll*1..5]->(u:Domain) WHERE n1 <> u AND NOT n1.name IS NULL AND (((n1.is_da IS NULL OR n1.is_da=FALSE) AND (n1.is_dc IS NULL OR n1.is_dc=FALSE)) OR (NOT u.domain CONTAINS '.' + n1.domain AND n1.domain <> u.domain)) SET n1.can_dcsync=TRUE RETURN DISTINCT p2 as p", "output_type": "Graph", "scope_query": "MATCH (n1) return count(n1)", "is_a_write_request": "true" }, "set_dcsync2": { "name": "Set dcsync=TRUE to nodes that can DCSync (GenericAll/AllExtendedRights)", - "request": "MATCH (n2) WITH n2 ORDER BY n2.name SKIP PARAM1 LIMIT PARAM2 MATCH p3=allShortestPaths((n2)-[:MemberOf|GenericAll|AllExtendedRights*1..5]->(u:Domain)) WHERE n2 <> u AND NOT n2.name IS NULL AND (((n2.is_da IS NULL OR n2.is_da=FALSE) AND (n2.is_dc IS NULL OR n2.is_dc=FALSE)) OR (NOT u.domain CONTAINS '.' + n2.domain AND n2.domain <> u.domain)) SET n2.can_dcsync=TRUE RETURN DISTINCT p3 as p", + "request": "MATCH (n2) WITH n2 ORDER BY ID(n2) SKIP PARAM1 LIMIT PARAM2 MATCH p3=allShortestPaths((n2)-[:MemberOf|GenericAll|AllExtendedRights*1..5]->(u:Domain)) WHERE n2 <> u AND NOT n2.name IS NULL AND (((n2.is_da IS NULL OR n2.is_da=FALSE) AND (n2.is_dc IS NULL OR n2.is_dc=FALSE)) OR (NOT u.domain CONTAINS '.' + n2.domain AND n2.domain <> u.domain)) SET n2.can_dcsync=TRUE RETURN DISTINCT p3 as p", "output_type": "Graph", "scope_query": "MATCH (n1) return count(n1)", "is_a_write_request": "true" @@ -295,7 +277,7 @@ }, "set_ou_candidate": { "name": "Set ou_candidate=TRUE to candidates eligible to shortestou to DA", - "request": "MATCH (m) WHERE NOT m.name IS NULL AND ((m:Computer AND (m.is_dc=false OR m.is_dc IS NULL)) OR (m:User AND (m.is_da=false OR m.is_da IS NULL))) SET m.ou_candidate=TRUE", + "request": "MATCH (m) WHERE NOT m.name IS NULL AND ((m:Computer AND m.enabled AND (m.is_dc=false OR m.is_dc IS NULL)) OR (m:User AND m.enabled AND (m.is_da=false OR m.is_da IS NULL))) SET m.ou_candidate=TRUE", "output_type": "list", "is_a_write_request": "true" }, @@ -323,6 +305,12 @@ "output_type": "list", "is_a_write_request": "true" }, + "set_is_adcs": { + "name": "Set is_adcs to ADCS servers", + "request": "MATCH (g:Group) WHERE g.objectid ENDS WITH '-517' MATCH (c:Computer)-[r:MemberOf*1..4]->(g) SET c.is_adcs=TRUE RETURN c.domain AS domain, c.name AS name", + "output_type": "dict", + "is_a_write_request": "true" + }, "set_path_candidate": { "name": "Set path_candidate=TRUE to candidates eligible to shortestPath to DA", "request": "MATCH (o{is_da_dc:false}) WHERE NOT o:Domain AND ((o.enabled=True AND o:User) OR NOT o:User) AND (NOT o.is_adcs OR o.is_adcs is null) SET o.path_candidate=TRUE", @@ -363,12 +351,6 @@ "output_type": "list", "is_a_write_request": "true" }, - "set_is_adcs": { - "name": "Set is_adcs to ADCS servers", - "request": "MATCH (g:Group) WHERE g.objectid ENDS WITH '-517' MATCH (c:Computer)-[r:MemberOf*1..4]->(g) SET c.is_adcs=TRUE RETURN c.domain AS domain, c.name AS name", - "output_type": "dict", - "is_a_write_request": "true" - }, "set_groups_direct_admin": { "name": "Set groups which are direct admins of computers", "request": "MATCH (g:Group)-[r:AdminTo]->(c:Computer) SET g.is_admin=true RETURN DISTINCT g", @@ -400,6 +382,18 @@ "output_type": "list", "is_a_write_request": "true" }, + "set_user_indirect_admin":{ + "name": "Set is_admin=True to users members of groups with is_admin=True", + "request": "MATCH (u:User)-[:MemberOf]->(:Group{is_admin:true}) SET u.is_admin=true", + "output_type": "list", + "is_a_write_request": "true" + }, + "set_users_direct_admin":{ + "name": "Set is_admin=True to users with ", + "request": "MATCH (u:User)-[:AdminTo]->() SET u.is_admin=true", + "output_type": "list", + "is_a_write_request": "true" + }, "set_target_kud": { "name":"Set target_kud attribute on nodes that are configured for KUD", "request": "MATCH (o{unconstraineddelegation:true}) WHERE ((o:User AND o.enabled=true) OR (o:Computer AND o.is_dc=false)) SET o.target_kud=TRUE", @@ -421,53 +415,33 @@ "azure_set_apps_name": { "name": "Set Azure applications names", "request": "MATCH (a:AZApp) WHERE a.name IS NULL AND a.displayname IS NOT NULL SET a.name = a.displayname", - "output_type": "List", + "output_type": "list", "is_a_write_request": "true" }, "nb_domain_collected": { "name": "Count number of domains collected", - "request": "MATCH (m:Domain)-[r]->() RETURN distinct(COALESCE(m.domain, m.name))", + "request": "MATCH (m:Domain{collected:true}) RETURN m.name", "output_type": "list" }, - "get_count_of_member_admin_group": { - "name": "Count number of users in group", - "request": "MATCH (u:User{enabled:true})-[r:MemberOf]->(gg:Group{is_admin:true}) WHERE NOT u.name IS NULL and NOT gg.name IS NULL WITH count(u) as count, gg as g MATCH (g) SET g.user_members_count=count", - "output_type": "list", - "is_a_write_request": "true" - }, - "get_users_linked_admin_group": { - "name": "Returns all users member of an admin group", - "request": "MATCH (u:User{enabled:true})-[r:MemberOf]->(gg:Group{is_admin:true}) WHERE NOT u.name IS NULL and NOT gg.name IS NULL SET u.is_admin=true RETURN u, gg, ID(u) as idu, ID(gg) as idg", - "output_type": "dict", - "is_a_write_request": "true" - }, - "get_groups_linked_admin_group": { - "name": "Returns all groups member of an admin group", - "request": "MATCH (g:Group)-[r:MemberOf]->(gg:Group{is_admin:true}) WHERE NOT g.name IS NULL and NOT gg.name IS NULL RETURN g, gg, ID(g) as idg, ID(gg) as idgg", - "output_type": "dict" - }, - "get_computers_linked_admin_group": { - "name": "Returns all computers administrated by an admin group", - "request": "MATCH (g:Group{is_admin:true})-[r:AdminTo]->(c:Computer) WHERE NOT c.name IS NULL and NOT g.name IS NULL RETURN g, c, ID(g) as idg, ID(c) as idc", - "output_type": "dict" - }, - "get_users_direct_admin": { - "name": "Return direct admin users", - "request": "MATCH (g:User{enabled:true})-[r:AdminTo]->(c:Computer) WHERE NOT g.name IS NULL and NOT c.name IS NULL SET g.is_admin=True RETURN g, c, ID(g) as idg, ID(c) as idc", - "output_type": "dict", - "is_a_write_request": "true" - }, "set_ghost_computer": { "name": "Set ghost_computer=TRUE to computers that did not login for more than 90 days", - "request": "MATCH (n:Computer) WHERE toInteger(($extract_date$ - n.lastlogontimestamp)/86400)>$password_renewal$ SET n.ghost_computer=TRUE", + "request": "MATCH (n:Computer{enabled:true}) WHERE toInteger(($extract_date$ - n.lastlogontimestamp)/86400)>$password_renewal$ SET n.ghost_computer=TRUE", "output_type": "list", "is_a_write_request": "true" }, "set_default_exploitability_rating" : { "name": "Set default exploitability rating (r.cost=100) to all relations", - "request": "MATCH ()-[r]->() SET r.cost=100", + "request": "MATCH ()-[r]->() WITH r SKIP PARAM1 LIMIT PARAM2 MATCH ()-[r]->() SET r.cost=100", + "scope_query": "MATCH ()-[r]->() RETURN count(r)", "output_type": "list" }, + "get_all_nodes" : { + "name": "Retrieving all nodes for fastGDS mode", + "request": "MATCH (o) WITH o ORDER BY ID(o) SKIP PARAM1 LIMIT PARAM2 MATCH (o) RETURN ID(o) AS id, LABELS(o) AS labels, o.name AS name, o.domain AS domain, o.tenant_id AS tenant_id", + "scope_query": "MATCH (o) return count(o)", + "output_type": "dict", + "postProcessing": "Neo4j.create_nodes_cache" + }, "check_unknown_relations" : { "name": "Checking for unknown relations", "request": "MATCH ()-[r]->() RETURN DISTINCT type(r) as relationType", @@ -491,18 +465,18 @@ }, "users_shadow_credentials": { "name": "Non privileged users that can impersonate privileged users", - "request": "MATCH (u:User{enabled:true,is_da:false}) WITH u ORDER BY u.name SKIP PARAM1 LIMIT PARAM2 MATCH p=allShortestPaths((u)-[r:MemberOf|AddKeyCredentialLink|WriteProperty|GenericAll|GenericWrite|Owns|WriteDacl*1..3]->(m:User{is_da:true,enabled:true})) RETURN p ", + "request": "MATCH (u:User{enabled:true,is_da:false}) WITH u ORDER BY ID(u) SKIP PARAM1 LIMIT PARAM2 MATCH p=(u)-[:MemberOf*0..3]->()-[r:AddKeyCredentialLink|WriteProperty|GenericAll|GenericWrite|Owns|WriteDacl]->(m:User{is_da:true,enabled:true}) RETURN p ", "scope_query": "MATCH (u:User{is_da:false, enabled:true}) return count(u)", "output_type": "Graph" }, "users_shadow_credentials_to_non_admins": { "name": "Non privileged users that can be impersonated by non privileged users", "is_a_gds_request": "true", - "request": "CALL {MATCH (s:User{enabled:true, is_da:false}) RETURN s UNION ALL MATCH (s:Group{is_dag:false,is_da:false}) RETURN s} WITH s ORDER BY s.name SKIP PARAM1 LIMIT PARAM2 MATCH p=shortestPath((s)-[r:AddKeyCredentialLink|WriteProperty|GenericAll|GenericWrite|Owns|WriteDacl*1..3]->(t:User{enabled:true})) WHERE s <> t AND s.is_group_account_operator IS NULL RETURN p", + "request": "MATCH (s) WHERE (s:User AND s.enabled AND NOT s.is_da) OR (s:Group AND NOT s.is_dag AND NOT s.is_da) WITH s ORDER BY ID(s) SKIP PARAM1 LIMIT PARAM2 MATCH p=shortestPath((s)-[r:AddKeyCredentialLink|WriteProperty|GenericAll|GenericWrite|Owns|WriteDacl*1..3]->(t:User{enabled:true})) WHERE s <> t AND s.is_group_account_operator IS NULL RETURN p", "create_gds_graph": "CALL gds.graph.project.cypher('graph_users_shadow_credentials_to_non_admins', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:AddKeyCredentialLink|WriteProperty|GenericAll|GenericWrite|Owns|WriteDacl]->(m) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", "gds_request": "MATCH (target:User{enabled:true}) CALL gds.allShortestPaths.dijkstra.stream('graph_users_shadow_credentials_to_non_admins', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path WITH nodes(path)[-1] AS starting_node, path WHERE starting_node <> target AND starting_node.is_group_account_operator IS NULL AND starting_node.is_account_operator IS NULL AND ((starting_node:User AND starting_node.enabled AND NOT starting_node.is_da) OR (starting_node:Group AND NOT starting_node.is_dag AND NOT starting_node.is_da)) RETURN path as p", "drop_gds_graph": "CALL gds.graph.drop('graph_users_shadow_credentials_to_non_admins', false) YIELD graphName", - "scope_query": "CALL {MATCH (s:User{enabled:true, is_da:false}) RETURN s UNION ALL MATCH (s:Group{is_dag:false,is_da:false}) RETURN s} WITH s ORDER BY s.name RETURN count(s)", + "scope_query": "MATCH (s) WHERE (s:User AND s.enabled AND NOT s.is_da) OR (s:Group AND NOT s.is_dag AND NOT s.is_da) WITH s ORDER BY ID(s) RETURN count(s)", "output_type": "Graph", "reverse_path": true }, @@ -523,12 +497,12 @@ }, "nb_computers": { "name": "Number of computers", - "request": "MATCH (c:Computer) WHERE NOT c.name IS NULL RETURN DISTINCT(c.domain) AS domain, c.name AS name, c.operatingsystem AS os, c.ghost_computer AS ghost ORDER BY c.domain", + "request": "MATCH (c:Computer) WHERE NOT c.name IS NULL RETURN DISTINCT(c.domain) AS domain, c.name AS name, c.operatingsystem AS os, c.ghost_computer AS ghost, c.enabled as enabled ORDER BY c.domain", "output_type": "dict" }, "computers_not_connected_since": { "name": "Computers not connected since", - "request": "MATCH (c:Computer) WHERE NOT c.lastlogontimestamp IS NULL AND c.name IS NOT NULL RETURN c.name AS name, toInteger(($extract_date$ - c.lastlogontimestamp)/86400) as days, toInteger(($extract_date$ - c.pwdlastset)/86400) as pwdlastset, c.enabled as enabled ORDER BY days DESC ", + "request": "MATCH (c:Computer) WHERE NOT c.lastlogontimestamp IS NULL AND c.name IS NOT NULL AND c.enabled RETURN c.name AS name, toInteger(($extract_date$ - c.lastlogontimestamp)/86400) as days, toInteger(($extract_date$ - c.pwdlastset)/86400) as pwdlastset, c.enabled as enabled ORDER BY days DESC ", "output_type": "dict" }, "nb_domain_admins": { @@ -573,7 +547,7 @@ }, "dormant_accounts": { "name": "Dormant accounts", - "request": "MATCH (n:User{enabled:true}) WHERE toInteger(($extract_date$ - n.lastlogontimestamp)/86400)>$password_renewal$ RETURN n.domain as domain, n.name as name,toInteger(($extract_date$ - n.lastlogontimestamp)/86400) AS days, toInteger(($extract_date$ - n.whencreated)/86400) AS accountCreationDate ORDER BY days DESC", + "request": "MATCH (n:User{enabled:true}) WHERE toInteger(($extract_date$ - n.lastlogontimestamp)/86400)>$password_renewal$ RETURN n.domain as domain, n.name as name, n.displayname as displayname, toInteger(($extract_date$ - n.lastlogontimestamp)/86400) AS days, toInteger(($extract_date$ - n.whencreated)/86400) AS accountCreationDate, n.distinguishedname as distinguishedname ORDER BY days DESC", "output_type": "dict" }, "password_last_change": { @@ -613,26 +587,27 @@ "create_gds_graph": "CALL gds.graph.project.cypher('graph_objects_to_domain_admin', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:$properties$]->(m) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", "drop_gds_graph": "CALL gds.graph.drop('graph_objects_to_domain_admin', false) YIELD graphName", "request": "MATCH (m{path_candidate:true}) WHERE NOT m.name IS NULL WITH m ORDER BY ID(m) SKIP PARAM1 LIMIT PARAM2 MATCH p = shortestPath((m)-[r:$properties$*1..$recursive_level$]->(g:Group{is_dag:true})) WHERE m<>g SET m.has_path_to_da=true RETURN DISTINCT(p) as p", - "gds_request" : "MATCH (target:Group {is_dag: true}) CALL gds.allShortestPaths.dijkstra.stream('graph_objects_to_domain_admin', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path WITH nodes(path)[-1] AS starting_node, path WHERE starting_node.path_candidate = TRUE SET starting_node.has_path_to_da=true RETURN path as p", - "output_type": "Graph", + "gds_request" : "MATCH (target:Group {is_dag: true}) CALL gds.allShortestPaths.dijkstra.stream('graph_objects_to_domain_admin', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path, costs WITH nodes(path)[-1] AS starting_node, path, costs WHERE starting_node.path_candidate = TRUE SET starting_node.has_path_to_da=true RETURN [n in nodes(path) | ID(n)] AS nodeIds, costs", + "output_type": "fastGDS", "scope_query": "MATCH (m{path_candidate:true}) WHERE NOT m.name IS NULL RETURN count(m)", "reverse_path": true, "is_a_write_request": "true" }, "objects_to_adcs": { "name": "Objects with path to ADCS servers", - "request": "MATCH (o{path_candidate:true}) WHERE NOT o.name IS NULL WITH o ORDER BY o.name SKIP PARAM1 LIMIT PARAM2 MATCH p=(o)-[rrr:MemberOf*0..4]->()-[rr:AdminTo]->(c{is_adcs:true}) RETURN DISTINCT(p) as p", + "request": "MATCH (o{path_candidate:true}) WHERE NOT o:Group AND NOT o.name IS NULL WITH o ORDER BY o.name SKIP PARAM1 LIMIT PARAM2 MATCH p=(o)-[rrr:MemberOf*0..4]->()-[rr:AdminTo]->(c{is_adcs:true}) RETURN DISTINCT(p) as p", "output_type": "Graph", "scope_query": "MATCH (m{path_candidate:true}) WHERE NOT m.name IS NULL RETURN count(m)" }, "users_admin_on_computers": { "name": "Users admin on machines", - "request": "MATCH p=(n:User{enabled:true})-[r:MemberOf|AdminTo*1..4]->(m:Computer) WHERE n:User AND n.enabled=true RETURN distinct(n.name) AS user,m.name AS computer,m.has_path_to_da AS has_path_to_da, ID(n) as user_id", - "output_type": "dict" + "request": "MATCH (u:User{enabled:true}) WITH u ORDER BY ID(u) SKIP PARAM1 LIMIT PARAM2 MATCH p=(u)-[:MemberOf*0..3]->()-[r:AdminTo]->(c:Computer) RETURN u.name AS user, u.displayname as displayname, c.name AS computer, c.has_path_to_da AS has_path_to_da, ID(u) as user_id, u.distinguishedname AS distinguishedname, p", + "output_type": "mixed", + "scope_query": "MATCH (u:User{enabled:true}) RETURN count(u)" }, "users_admin_on_servers_1": { "name": "Users admin on servers n\u00b01", - "request": "MATCH (n:User{enabled:true,is_da:false}) WHERE NOT n.name IS NULL WITH n ORDER BY ID(n) SKIP PARAM1 LIMIT PARAM2 MATCH p=(n)-[r:MemberOf*1..4]->(g:Group)-[r1:$properties$]->(u:Computer) WITH LENGTH(p) as pathLength, p, n, u WHERE NONE (x in NODES(p)[1..(pathLength-1)] WHERE x.objectid = u.objectid) AND NOT n.objectid = u.objectid RETURN n.name AS user, u.name AS computer, u.has_path_to_da as has_path_to_da", + "request": "MATCH (n:User{enabled:true,is_da:false}) WHERE NOT n.name IS NULL WITH n ORDER BY ID(n) SKIP PARAM1 LIMIT PARAM2 MATCH p=(n)-[r:MemberOf*1..2]->(g:Group)-[r1:$properties$]->(u:Computer) WITH LENGTH(p) as pathLength, p, n, u WHERE NONE (x in NODES(p)[1..(pathLength-1)] WHERE x.objectid = u.objectid) AND NOT n.objectid = u.objectid RETURN n.name AS user, u.name AS computer, u.has_path_to_da as has_path_to_da", "scope_query": "MATCH (n:User{enabled:true,is_da:false}) WHERE NOT n.name IS NULL RETURN count(n)", "output_type": "dict" }, @@ -644,23 +619,23 @@ }, "computers_admin_on_computers": { "name": "Number of computers admin of computers", - "request": "CALL{MATCH (c1:Computer)-[r1:AdminTo]->(c2:Computer) WHERE c1.name IS NOT NULL AND c2.name IS NOT NULL AND c1 <> c2 RETURN c1.name AS source_computer, c2.name AS target_computer, c2.has_path_to_da AS has_path_to_da UNION ALL MATCH (c1:Computer)-[r2:MemberOf*1..4]->(g:Group)-[r3:AdminTo]->(c2:Computer) WHERE c1.name IS NOT NULL AND c2.name IS NOT NULL AND c1 <> c2 RETURN c1.name AS source_computer, c2.name AS target_computer, c2.has_path_to_da AS has_path_to_da} RETURN distinct(source_computer), target_computer, has_path_to_da", + "request": "MATCH (c1:Computer)-[:MemberOf*0..]->()-[:AdminTo]->(c2:Computer) WHERE c1 <> c2 RETURN DISTINCT c1.name AS source_computer, c2.name AS target_computer, c2.has_path_to_da AS has_path_to_da, c2.smbsigning AS smbsigning", "output_type": "dict" }, "domain_map_trust": { "name": "Domain map trust", - "request": "MATCH p=shortestpath((d:Domain)-[:TrustedBy]->(m:Domain)) WHERE d<>m RETURN DISTINCT(p)", + "request": "MATCH p=shortestpath((d:Domain)-[:TrustedBy|AbuseTGTDelegation|SameForestTrust|SpoofSIDHistory|CrossForestTrust]->(m:Domain)) WHERE d<>m RETURN DISTINCT(p)", "output_type": "Graph" }, "kud": { "name": "Shortest paths to objects configured for KUD", "is_a_gds_request": "true", - "create_gds_graph": "CALL gds.graph.project.cypher('graph_kud', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:$properties$]->(m) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", + "create_gds_graph": "CALL gds.graph.project.cypher('graph_kud', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:$properties$]->(m) WHERE NOT (m:Domain OR n:Domain) AND NOT (n.is_dag=true or m.is_dag=true) AND NOT (n.is_da=true or m.is_da=true) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", "drop_gds_graph": "CALL gds.graph.drop('graph_kud', false) YIELD graphName", "request": "MATCH (n) WHERE (n:Computer OR (n:User AND n.enabled=true)) AND (n.is_da IS NULL OR n.is_da=FALSE) AND (n.is_dc IS NULL OR n.is_dc=FALSE) WITH n ORDER BY n.name SKIP PARAM1 LIMIT PARAM2 MATCH p=shortestPath((n)-[:$properties$*1..$recursive_level$]->(m{target_kud:true})) WHERE NOT n=m AND (((n.is_da IS NULL OR n.is_da=FALSE) AND (n.is_dc IS NULL OR n.is_dc=FALSE)) OR (NOT m.domain CONTAINS '.' + n.domain AND n.domain <> m.domain)) RETURN DISTINCT(p)", - "gds_request": "MATCH (target{target_kud:true}) CALL gds.allShortestPaths.dijkstra.stream('graph_kud', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path WITH nodes(path)[-1] AS starting_node, path WHERE ((starting_node:Computer OR (starting_node:User AND starting_node.enabled=true)) AND (starting_node.is_da IS NULL OR starting_node.is_da=FALSE) AND (starting_node.is_dc IS NULL OR starting_node.is_dc=FALSE)) AND (target <> starting_node AND (((starting_node.is_da IS NULL OR starting_node.is_da=FALSE) AND (starting_node.is_dc IS NULL OR starting_node.is_dc=FALSE)) OR (NOT target.domain CONTAINS '.' + starting_node.domain AND starting_node.domain <> target.domain))) RETURN path as p", + "gds_request": "MATCH (target{target_kud:true}) CALL gds.allShortestPaths.dijkstra.stream('graph_kud', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path, costs WITH nodes(path)[-1] AS starting_node, path, costs WHERE ((starting_node:Computer OR (starting_node:User AND starting_node.enabled=true)) AND (starting_node.is_da IS NULL OR starting_node.is_da=FALSE) AND (starting_node.is_dc IS NULL OR starting_node.is_dc=FALSE)) AND (target <> starting_node AND (((starting_node.is_da IS NULL OR starting_node.is_da=FALSE) AND (starting_node.is_dc IS NULL OR starting_node.is_dc=FALSE)) OR (NOT target.domain CONTAINS '.' + starting_node.domain AND starting_node.domain <> target.domain))) RETURN [n in nodes(path) | ID(n)] AS nodeIds, costs", "reverse_path": true, - "output_type": "Graph", + "output_type": "fastGDS", "scope_query": "MATCH (n) WHERE (n:Computer OR (n:User AND n.enabled=true)) AND (n.is_da IS NULL OR n.is_da=FALSE) AND (n.is_dc IS NULL OR n.is_dc=FALSE) RETURN count(n)" }, "nb_computers_laps": { @@ -670,45 +645,46 @@ }, "can_read_laps": { "name": "Objects allowed to read LAPS", - "request": "MATCH p = (n{path_candidate:true})-[r1:MemberOf*0..]->()-[r2:GenericAll|ReadLAPSPassword|AllExtendedRights|SyncLAPSPassword]->(t:Computer {haslaps:true}) RETURN distinct(n.domain) AS domain, n.name AS name", - "output_type": "dict" + "request": "MATCH (n{path_candidate:true}) WHERE n:User OR n:Group OR n:Computer WITH n ORDER BY ID(n) SKIP PARAM1 LIMIT PARAM2 MATCH p = (n)-[r1:MemberOf*0..3]->()-[r2:GenericAll|ReadLAPSPassword|AllExtendedRights|SyncLAPSPassword]->(t:Computer{haslaps:true}) WHERE NOT (n)-[:MemberOf*0..3]->()-[:AdminTo]->(t) RETURN DISTINCT n.domain AS source_domain, n.name AS source_name, labels(n) as source_labels, t.domain as target_domain, t.name as target_name", + "output_type": "dict", + "scope_query": "MATCH (n{path_candidate:true}) WHERE n:User OR n:Group OR n:Computer RETURN count(n)" }, "objects_to_dcsync": { "name": "Objects to dcsync", "is_a_gds_request": "true", - "create_gds_graph": "CALL gds.graph.project.cypher('graph_objects_to_dcsync', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:$properties$]->(m) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", + "create_gds_graph": "CALL gds.graph.project.cypher('graph_objects_to_dcsync', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:$properties$]->(m) WHERE NOT (m:Domain OR n:Domain) AND NOT (n.is_dag=true or m.is_dag=true) AND NOT (n.is_da=true or m.is_da=true) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", "drop_gds_graph": "CALL gds.graph.drop('graph_objects_to_dcsync', false) YIELD graphName", "request": "MATCH (n{path_candidate:true}) WHERE n.can_dcsync IS NULL AND NOT n.name IS NULL WITH n ORDER BY n.name SKIP PARAM1 LIMIT PARAM2 MATCH p = shortestPath((n)-[r:$properties$*1..$recursive_level$]->(target{can_dcsync:TRUE})) WHERE n<>target RETURN distinct(p) AS p", - "gds_request": "MATCH (target{can_dcsync:TRUE}) CALL gds.allShortestPaths.dijkstra.stream('graph_objects_to_dcsync', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path WITH nodes(path)[-1] AS starting_node, path WHERE target <> starting_node AND starting_node.path_candidate = TRUE AND starting_node:User RETURN path as p", - "output_type": "Graph", + "gds_request": "MATCH (target{can_dcsync:TRUE}) CALL gds.allShortestPaths.dijkstra.stream('graph_objects_to_dcsync', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path, costs WITH nodes(path)[-1] AS starting_node, path, costs WHERE target <> starting_node AND starting_node.path_candidate = TRUE AND starting_node:User RETURN [n in nodes(path) | ID(n)] AS nodeIds, costs", + "output_type": "fastGDS", "scope_query": "MATCH (n{path_candidate:true}) WHERE n.can_dcsync IS NULL AND NOT n.name IS NULL RETURN count(n)", "reverse_path": true }, "dom_admin_on_non_dc": { "name": "Domain admin with session on non DC computers", - "request": "MATCH p=(c:Computer)-[r:HasSession]->(u:User{enabled:true, is_da:true}) WHERE NOT c.name IS NULL and NOT u.name IS NULL and NOT c.is_dc=True RETURN distinct(p) AS p", + "request": "MATCH p=(c:Computer{path_candidate:true})-[r:HasSession]->(u:User{enabled:true, is_da:true}) WHERE NOT c.name IS NULL and NOT u.name IS NULL and NOT c.is_dc=True RETURN distinct(p) AS p", "output_type": "Graph" }, "unpriv_to_dnsadmins": { "name": "Unprivileged users with path to DNSAdmins", "is_a_gds_request": "true", - "create_gds_graph": "CALL gds.graph.project.cypher('graph_unpriv_to_dnsadmins', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:MemberOf]->(m) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", + "create_gds_graph": "CALL gds.graph.project.cypher('graph_unpriv_to_dnsadmins', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:MemberOf]->(m) WHERE NOT (m:Domain OR n:Domain) AND NOT (n.is_dag=true or m.is_dag=true) AND NOT (n.is_da=true or m.is_da=true) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", "drop_gds_graph": "CALL gds.graph.drop('graph_unpriv_to_dnsadmins', false) YIELD graphName", "request": "MATCH (u:User{path_candidate:true}) WITH u ORDER BY u.name SKIP PARAM1 LIMIT PARAM2 MATCH p=(u)-[r:MemberOf*1..$recursive_level$]->(g:Group{is_dnsadmin:true}) RETURN distinct(p) AS p", - "gds_request": "MATCH (target:Group{is_dnsadmin:true}) CALL gds.allShortestPaths.dijkstra.stream('graph_unpriv_to_dnsadmins', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path WITH nodes(path)[-1] AS starting_node, path WHERE target <> starting_node AND starting_node.path_candidate = TRUE AND starting_node:User RETURN path as p", - "output_type": "Graph", + "gds_request": "MATCH (target:Group{is_dnsadmin:true}) CALL gds.allShortestPaths.dijkstra.stream('graph_unpriv_to_dnsadmins', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path, costs WITH nodes(path)[-1] AS starting_node, path, costs WHERE target <> starting_node AND starting_node.path_candidate = TRUE AND starting_node:User RETURN [n in nodes(path) | ID(n)] AS nodeIds, costs", + "output_type": "fastGDS", "reverse_path": true, "scope_query": "MATCH (u:User{path_candidate:true}) RETURN count(u)" }, "rdp_access": { "name": "Users with RDP-access to Computers ", - "request": "MATCH (u:User{enabled:true,is_da:false}) WITH u ORDER BY u.name SKIP PARAM1 LIMIT PARAM2 CALL {WITH u MATCH p=(u)-[r1:MemberOf*1..5]->(m:Group)-[r2:CanRDP]->(c:Computer) RETURN u.name as user, c.name as computer UNION ALL WITH u MATCH p=(u)-[r2:CanRDP]->(c:Computer) RETURN u.name as user, c.name as computer} RETURN DISTINCT user, computer", + "request": "MATCH (u:User{enabled:true,is_da:false}) WITH u ORDER BY ID(u) SKIP PARAM1 LIMIT PARAM2 MATCH p=(u)-[r1:MemberOf*0..5]->()-[r2:CanRDP]->(c:Computer) RETURN u.name as user, c.name as computer", "scope_query": "MATCH (u:User{enabled:true,is_da:false}) RETURN count(u)", "output_type": "dict" }, "dc_impersonation": { "name": "Non-domain admins that can directly or indirectly impersonate a Domain Controller ", - "request": "MATCH (u{ou_candidate:true}) WITH u ORDER BY u.name SKIP PARAM1 LIMIT PARAM2 CALL{WITH u MATCH p=(u)-[r:MemberOf*1..5]->(g:Group)-[r3:AddKeyCredentialLink|WriteProperty|GenericAll|GenericWrite|Owns|WriteDacl]->(m:Computer{is_dc:true}) RETURN p UNION ALL WITH u MATCH p=(u)-[r3:AddKeyCredentialLink|WriteProperty|GenericAll|GenericWrite|Owns|WriteDacl]->(m:Computer{is_dc:true}) RETURN p }RETURN DISTINCT p", + "request": "MATCH (u{ou_candidate:true}) WITH u ORDER BY ID(u) SKIP PARAM1 LIMIT PARAM2 MATCH p=(u)-[r:MemberOf*0..3]->()-[r3:AddKeyCredentialLink|WriteProperty|GenericAll|GenericWrite|Owns|WriteDacl]->(m:Computer{is_dc:true}) RETURN DISTINCT p", "scope_query": "MATCH (u{ou_candidate:true}) RETURN count(u)", "output_type": "Graph" }, @@ -731,8 +707,8 @@ "create_gds_graph": "CALL gds.graph.project.cypher('graph_compromise_paths_of_OUs', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:MemberOf|GenericAll|GenericWrite|Owns|WriteOwner|WriteDacl|WriteGPLink]->(m) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", "drop_gds_graph": "CALL gds.graph.drop('graph_compromise_paths_of_OUs', false) YIELD graphName", "request": "MATCH (o:OU) WITH o ORDER BY ID(o) SKIP PARAM1 LIMIT PARAM2 MATCH p=shortestPath((u{ou_candidate:true})-[:MemberOf|GenericAll|GenericWrite|Owns|WriteOwner|WriteDacl|WriteGPLink*1..8]->(o:OU)) SET o.vulnerable_OU = TRUE RETURN p", - "gds_request": "MATCH (target:OU) CALL gds.allShortestPaths.dijkstra.stream('graph_compromise_paths_of_OUs', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path WITH nodes(path)[-1] AS starting_node, path WHERE starting_node.ou_candidate = TRUE SET starting_node.vulnerable_OU=true RETURN path as p", - "output_type": "Graph", + "gds_request": "MATCH (target:OU) CALL gds.allShortestPaths.dijkstra.stream('graph_compromise_paths_of_OUs', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path, costs WITH nodes(path)[-1] AS starting_node, path, costs WHERE starting_node.ou_candidate = TRUE SET starting_node.vulnerable_OU=true RETURN [n in nodes(path) | ID(n)] AS nodeIds, costs", + "output_type": "fastGDS", "reverse_path": true, "scope_query": "MATCH (o:OU) RETURN count(o)" }, @@ -742,8 +718,8 @@ "create_gds_graph": "CALL gds.graph.project.cypher('graph_vulnerable_OU_impact', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:Contains|MemberOf]->(m) RETURN id(n) as source, id(m) AS target, r.cost as cost', {validateRelationships: false})", "drop_gds_graph": "CALL gds.graph.drop('graph_vulnerable_OU_impact', false) YIELD graphName", "request": "MATCH (o:OU{vulnerable_OU:true}) WITH o ORDER BY o.name SKIP PARAM1 LIMIT PARAM2 MATCH p=shortestPath((o)-[:Contains|MemberOf*1..]->(e)) WHERE o <> e AND (e:User OR e:Computer) RETURN p", - "gds_request": "MATCH (source:OU{vulnerable_OU:true}) CALL gds.allShortestPaths.dijkstra.stream('graph_vulnerable_OU_impact', {sourceNode: source, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path WITH nodes(path)[-1] AS target_node, path WHERE target_node:User OR target_node:Computer RETURN path as p", - "output_type": "Graph", + "gds_request": "MATCH (source:OU{vulnerable_OU:true}) CALL gds.allShortestPaths.dijkstra.stream('graph_vulnerable_OU_impact', {sourceNode: source, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path, costs WITH nodes(path)[-1] AS target_node, path, costs WHERE target_node:User OR target_node:Computer RETURN [n in nodes(path) | ID(n)] AS nodeIds, costs", + "output_type": "fastGDS", "scope_query": "MATCH (o:OU{vulnerable_OU:true}) RETURN count(o)" }, "vuln_functional_level": { @@ -758,25 +734,25 @@ }, "can_read_gmsapassword_of_adm": { "name": "Objects allowed to read the GMSA of objects with admincount=True", - "request": "CALL {MATCH (o{path_candidate:true}) WITH o ORDER BY o.name SKIP PARAM1 LIMIT PARAM2 MATCH p=((o)-[:MemberOf*1..7]->(g:Group)-[:ReadGMSAPassword]->(u:User{is_admin:true})) WHERE o.name<>u.name RETURN DISTINCT(p) UNION ALL MATCH (o{path_candidate:true}) WITH o ORDER BY o.name SKIP PARAM1 LIMIT PARAM2 MATCH p=((o)-[:ReadGMSAPassword]->(u:User{is_admin:true})) WHERE o.name<>u.name RETURN DISTINCT(p) } RETURN p", + "request": "MATCH (o{path_candidate:true}) WITH o ORDER BY ID(o) SKIP PARAM1 LIMIT PARAM2 MATCH p=((o)-[:MemberOf*0..5]->()-[:ReadGMSAPassword]->(u:User{is_admin:true})) WHERE o.name<>u.name RETURN DISTINCT(p)", "output_type": "Graph", "scope_query": "MATCH (o{path_candidate:true}) RETURN count(o)" }, "objects_to_operators_member": { "name": "Unprivileged users with path to an Operator Member", "request": "MATCH (m:User{path_candidate:true}) WITH m ORDER BY m.name SKIP PARAM1 LIMIT PARAM2 MATCH p = shortestPath((m)-[r:$path_to_group_operators_props$*1..$recursive_level$]->(o:User{is_operator_member:true})) WHERE m<>o AND ((o.is_da=true AND o.domain<>m.domain) OR (o.is_da=false)) RETURN DISTINCT(p) as p", - "output_type": "Graph", + "output_type": "fastGDS", "scope_query": "MATCH (m:User{path_candidate:true}) RETURN count(m)", "is_a_gds_request": "true", - "create_gds_graph": "CALL gds.graph.project.cypher('graph_objects_to_operators_member', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:$path_to_group_operators_props$]->(m) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", + "create_gds_graph": "CALL gds.graph.project.cypher('graph_objects_to_operators_member', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:$path_to_group_operators_props$]->(m) WHERE NOT (m:Domain OR n:Domain) AND NOT (n.is_dag=true or m.is_dag=true) AND NOT (n.is_da=true or m.is_da=true) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", "drop_gds_graph": "CALL gds.graph.drop('graph_objects_to_operators_member', false) YIELD graphName", - "gds_request": "MATCH (target:User{is_operator_member:true}) CALL gds.allShortestPaths.dijkstra.stream('graph_objects_to_operators_member', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path WITH nodes(path)[-1] AS starting_node, path WHERE starting_node:User AND target <> starting_node AND starting_node.path_candidate = TRUE AND ((target.is_da=true AND target.domain<>starting_node.domain) OR (target.is_da=false)) RETURN path as p", + "gds_request": "MATCH (target:User{is_operator_member:true}) CALL gds.allShortestPaths.dijkstra.stream('graph_objects_to_operators_member', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path, costs WITH nodes(path)[-1] AS starting_node, path, costs WHERE starting_node:User AND target <> starting_node AND starting_node.path_candidate = TRUE AND ((target.is_da=true AND target.domain<>starting_node.domain) OR (target.is_da=false)) RETURN [n in nodes(path) | ID(n)] AS nodeIds, costs", "reverse_path": true, "_comment": "TODO: table with type, account name, is_da (star) and the number of path towards it" }, "objects_to_operators_groups": { "name": "Operator Member path to Operators Groups", - "request": "MATCH (m:User{is_operator_member:true}) WITH m ORDER BY m.name SKIP PARAM1 LIMIT PARAM2 MATCH p = shortestPath((m)-[r:MemberOf*1..$recursive_level$]->(o:Group{is_group_operator:true})) WHERE (m.is_da=true AND o.domain<>m.domain) OR (m.is_da=false) RETURN DISTINCT(p) as p", + "request": "MATCH (m:User{is_operator_member:true}) WITH m ORDER BY ID(m) SKIP PARAM1 LIMIT PARAM2 MATCH p = shortestPath((m)-[r:MemberOf*1..$recursive_level$]->(o:Group{is_group_operator:true})) WHERE (m.is_da=true AND o.domain<>m.domain) OR (m.is_da=false) RETURN DISTINCT(p) as p", "output_type": "Graph", "scope_query": "MATCH (m:User{is_operator_member:true}) RETURN count(m)", "_comment": "TODO: table with type, account name, is_da (star) and the number of path towards it" @@ -784,23 +760,23 @@ "vuln_permissions_adminsdholder": { "name": "Dangerous permissions on the adminSDHolder object", "is_a_gds_request": "true", - "create_gds_graph": "CALL gds.graph.project.cypher('graph_vuln_permissions_adminsdholder', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:$properties$]->(m) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", + "create_gds_graph": "CALL gds.graph.project.cypher('graph_vuln_permissions_adminsdholder', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:$properties$]->(m) WHERE NOT (m:Domain OR n:Domain) AND NOT (n.is_dag=true or m.is_dag=true) AND NOT (n.is_da=true or m.is_da=true) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", "drop_gds_graph": "CALL gds.graph.drop('graph_vuln_permissions_adminsdholder', false) YIELD graphName", "request": "MATCH (n:User{path_candidate:true}) WITH n ORDER BY n.name SKIP PARAM1 LIMIT PARAM2 MATCH p = shortestPath((n)-[r:$properties$*1..4]->(target1{is_adminsdholder:true})) WHERE n<>target1 AND NOT ANY(no in nodes(p) WHERE (no.is_da=true AND (no.domain=target1.domain OR target1.domain CONTAINS \".\" + no.domain))) RETURN distinct(p) AS p", - "gds_request": "MATCH (target{is_adminsdholder:true}) CALL gds.allShortestPaths.dijkstra.stream('graph_vuln_permissions_adminsdholder', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path WITH nodes(path)[-1] AS starting_node, path WHERE starting_node:User AND target <> starting_node AND starting_node.path_candidate = TRUE AND NOT ANY(no in nodes(path) WHERE (no.is_da=true AND (no.domain=target.domain OR target.domain CONTAINS \".\" + no.domain))) RETURN path as p", - "output_type": "Graph", + "gds_request": "MATCH (target{is_adminsdholder:true}) CALL gds.allShortestPaths.dijkstra.stream('graph_vuln_permissions_adminsdholder', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path, costs WITH nodes(path)[-1] AS starting_node, path, costs WHERE starting_node:User AND target <> starting_node AND starting_node.path_candidate = TRUE AND NOT ANY(no in nodes(path) WHERE (no.is_da=true AND (no.domain=target.domain OR target.domain CONTAINS \".\" + no.domain))) RETURN [n in nodes(path) | ID(n)] AS nodeIds, costs", + "output_type": "fastGDS", "scope_query": "MATCH (n:User{path_candidate:true}) RETURN count(n)", "reverse_path": true, "_comment": "TODO : table with les adminsdholder + path with => X objects to SDHolder" }, "da_to_da": { "name": "Paths between two domain admins belonging to different domains", - "request": "MATCH p=allShortestPaths((g:Group{is_dag:true})-[r:$properties$*1..$recursive_level$]->(gg:Group{is_dag:true})) WHERE g<>gg AND g.domain <> gg.domain RETURN p", + "request": "MATCH p=shortestPath((g:Group{is_dag:true})-[r:$properties$*1..$recursive_level$]->(gg:Group{is_dag:true})) WHERE g<>gg AND g.domain <> gg.domain RETURN p", "output_type": "Graph" }, "anomaly_acl_1": { "name": "anomaly_acl_1", - "request": "MATCH (gg) WHERE NOT gg:Group with gg as g MATCH (g)-[r2{isacl:true}]->(n) WHERE ((g.is_da IS NULL OR g.is_da=FALSE) AND (g.is_dc IS NULL OR g.is_dc=FALSE) AND (NOT g.is_adcs OR g.is_adcs IS NULL)) OR (NOT n.domain CONTAINS '.' + g.domain AND n.domain <> g.domain) RETURN n.name,g.name,type(r2),LABELS(g),labels(n),ID(n)", + "request": "MATCH (gg) WHERE NOT gg:Group AND ((gg:User AND gg.enabled) OR (gg:Computer AND gg.enabled) OR (NOT (gg:User OR gg:Computer))) WITH gg as g MATCH (g)-[r2{isacl:true}]->(n) WHERE ((g.is_da IS NULL OR g.is_da=FALSE) AND (g.is_dc IS NULL OR g.is_dc=FALSE) AND (NOT g.is_adcs OR g.is_adcs IS NULL)) OR (NOT n.domain CONTAINS '.' + g.domain AND n.domain <> g.domain) RETURN n.name,g.name,type(r2),LABELS(g),labels(n),ID(n)", "output_type": "dict" }, "anomaly_acl_2": { @@ -827,10 +803,10 @@ "name": "Initialization request for GPOs [WARNING: If this query is too slow, you can use --gpo_low]", "request": "MATCH (n:User{path_candidate:true}) WITH n ORDER BY n.name SKIP PARAM1 LIMIT PARAM2 MATCH p = shortestPath((n)-[r:MemberOf|AddSelf|WriteSPN|AddKeyCredentialLink|AddMember|AllExtendedRights|ForceChangePassword|GenericAll|GenericWrite|WriteDacl|WriteOwner|Owns*1..]->(g:GPO)) WHERE NOT n=g AND NOT g.name IS NULL RETURN p ", "is_a_gds_request": "true", - "create_gds_graph": "CALL gds.graph.project.cypher('graph_unpriv_users_to_GPO_init', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:MemberOf|AddSelf|WriteSPN|AddKeyCredentialLink|AddMember|AllExtendedRights|ForceChangePassword|GenericAll|GenericWrite|WriteDacl|WriteOwner|Owns]->(m) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", + "create_gds_graph": "CALL gds.graph.project.cypher('graph_unpriv_users_to_GPO_init', 'MATCH (n) RETURN id(n) AS id', 'MATCH (n)-[r:MemberOf|AddSelf|WriteSPN|AddKeyCredentialLink|AddMember|AllExtendedRights|ForceChangePassword|GenericAll|GenericWrite|WriteDacl|WriteOwner|Owns]->(m) WHERE NOT (m:Domain OR n:Domain) AND NOT (n.is_dag=true or m.is_dag=true) AND NOT (n.is_da=true or m.is_da=true) RETURN id(m) as source, id(n) AS target, r.cost as cost', {validateRelationships: false})", "drop_gds_graph": "CALL gds.graph.drop('graph_unpriv_users_to_GPO_init', false) YIELD graphName", - "gds_request": "MATCH (target:GPO) CALL gds.allShortestPaths.dijkstra.stream('graph_unpriv_users_to_GPO_init', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path WITH nodes(path)[-1] AS starting_node, path WHERE starting_node:User AND target <> starting_node AND starting_node.path_candidate = TRUE RETURN path as p", - "output_type": "Graph", + "gds_request": "MATCH (target:GPO) CALL gds.allShortestPaths.dijkstra.stream('graph_unpriv_users_to_GPO_init', {sourceNode: target, relationshipWeightProperty: 'cost', logProgress: false}) YIELD path, costs WITH nodes(path)[-1] AS starting_node, path, costs WHERE starting_node:User AND target <> starting_node AND starting_node.path_candidate = TRUE RETURN [n in nodes(path) | ID(n)] AS nodeIds, costs", + "output_type": "fastGDS", "scope_query": "MATCH (n:User{path_candidate:true}) RETURN count(n)", "postProcessing": "Neo4j.setDangerousInboundOnGPOs", "reverse_path": true, @@ -838,35 +814,35 @@ }, "unpriv_users_to_GPO_user_enforced": { "name": "Compromisable GPOs to users (enforced)", - "request": "MATCH (n:User{enabled:true}) WHERE n.name IS NOT NULL WITH n ORDER BY n.name SKIP PARAM1 LIMIT PARAM2 MATCH p = (g:GPO{dangerous_inbound:true})-[r1:GPLink {enforced:true}]->(container2)-[r2:Contains*1..]->(n) RETURN p", + "request": "MATCH (n:User{enabled:true}) WHERE n.name IS NOT NULL WITH n ORDER BY ID(n) SKIP PARAM1 LIMIT PARAM2 MATCH p = (g:GPO{dangerous_inbound:true})-[r1:GPLink {enforced:true}]->(container2)-[r2:Contains*1..]->(n) RETURN p", "output_type": "Graph", "scope_query": "MATCH (n:User{enabled:true}) WHERE n.name IS NOT NULL RETURN count(n)", "_comment": "This is a request for the --gpo_low option" }, "unpriv_users_to_GPO_user_not_enforced": { "name": "Compromisable GPOs to users (not enforced)", - "request": "MATCH (n:User{enabled:true}) WHERE n.name IS NOT NULL WITH n ORDER BY n.name SKIP PARAM1 LIMIT PARAM2 MATCH p = (g:GPO{dangerous_inbound:true})-[r1:GPLink{enforced:false}]->(container1)-[r2:Contains*1..]->(n) WHERE NONE(x in NODES(p) WHERE x.blocksinheritance = true AND (x:OU)) RETURN p", + "request": "MATCH (n:User{enabled:true}) WHERE n.name IS NOT NULL WITH n ORDER BY ID(n) SKIP PARAM1 LIMIT PARAM2 MATCH p = (g:GPO{dangerous_inbound:true})-[r1:GPLink{enforced:false}]->(container1)-[r2:Contains*1..]->(n) WHERE NONE(x in NODES(p) WHERE x.blocksinheritance = true AND (x:OU)) RETURN p", "output_type": "Graph", "scope_query": "MATCH (n:User{enabled:true}) WHERE n.name IS NOT NULL RETURN count(n)", "_comment": "This is a request for the --gpo_low option" }, "unpriv_users_to_GPO_computer_enforced": { "name": "Compromisable GPOs to computers (enforced)", - "request": "MATCH (n:Computer) WITH n ORDER BY n.name WITH n SKIP PARAM1 LIMIT PARAM2 MATCH p = (g:GPO{dangerous_inbound:true})-[r1:GPLink {enforced:true}]->(container2)-[r2:Contains*1..]->(n) RETURN p", + "request": "MATCH (n:Computer) WITH n ORDER BY ID(n) WITH n SKIP PARAM1 LIMIT PARAM2 MATCH p = (g:GPO{dangerous_inbound:true})-[r1:GPLink {enforced:true}]->(container2)-[r2:Contains*1..]->(n) RETURN p", "output_type": "Graph", "scope_query": "MATCH (n:Computer) RETURN count(n)", "_comment": "This is a request for the --gpo_low option" }, "unpriv_users_to_GPO_computer_not_enforced": { "name": "Compromisable GPOs to computers (not enforced)", - "request": "MATCH (n:Computer) WITH n ORDER BY n.name WITH n SKIP PARAM1 LIMIT PARAM2 MATCH p = (g:GPO{dangerous_inbound:true})-[r1:GPLink{enforced:false}]->(container1)-[r2:Contains*1..]->(n) WHERE NONE(x in NODES(p) WHERE x.blocksinheritance = true AND (x:OU)) RETURN p", + "request": "MATCH (n:Computer) WITH n ORDER BY ID(n) WITH n SKIP PARAM1 LIMIT PARAM2 MATCH p = (g:GPO{dangerous_inbound:true})-[r1:GPLink{enforced:false}]->(container1)-[r2:Contains*1..]->(n) WHERE NONE(x in NODES(p) WHERE x.blocksinheritance = true AND (x:OU)) RETURN p", "output_type": "Graph", "scope_query": "MATCH (n:Computer) RETURN count(n)", "_comment": "This is a request for the --gpo_low option" }, "unpriv_users_to_GPO": { "name": "Non privileged users to GPO", - "request": "MATCH (g:GPO) WITH g ORDER BY g.name SKIP PARAM1 LIMIT PARAM2 OPTIONAL MATCH (g)-[r1:GPLink {enforced:false}]->(container1) WITH g,container1 OPTIONAL MATCH (g)-[r2:GPLink {enforced:true}]->(container2) WITH g,container1,container2 OPTIONAL MATCH p = (g)-[r1:GPLink]->(container1)-[r2:Contains*1..8]->(n1:Computer) WHERE NONE(x in NODES(p) WHERE x.blocksinheritance = true AND (x:OU)) WITH g,p,container2,n1 OPTIONAL MATCH p2 = (g)-[r1:GPLink]->(container2)-[r2:Contains*1..8]->(n2:Computer) RETURN p", + "request": "MATCH (g:GPO) WITH g ORDER BY ID(g) SKIP PARAM1 LIMIT PARAM2 OPTIONAL MATCH (g)-[r1:GPLink {enforced:false}]->(container1) WITH g,container1 OPTIONAL MATCH (g)-[r2:GPLink {enforced:true}]->(container2) WITH g,container1,container2 OPTIONAL MATCH p = (g)-[r1:GPLink]->(container1)-[r2:Contains*1..8]->(n1:Computer) WHERE NONE(x in NODES(p) WHERE x.blocksinheritance = true AND (x:OU)) WITH g,p,container2,n1 OPTIONAL MATCH p2 = (g)-[r1:GPLink]->(container2)-[r2:Contains*1..8]->(n2:Computer) RETURN p", "output_type": "Graph", "scope_query": "MATCH (g:GPO) RETURN COUNT(g)", "_comment": "this is the normal version of the GPO request" @@ -906,6 +882,22 @@ "request": "MATCH (u:User) WHERE u.fgpp_name IS NOT NULL RETURN u.fgpp_msds_psoappliesto, u.fgpp_name, u.fgpp_msds_minimumpasswordlength, u.fgpp_msds_minimumpasswordage, u.fgpp_msds_maximumpasswordage, u.fgpp_msds_passwordreversibleencryptionenabled, u.fgpp_msds_passwordhistorylength, u.fgpp_msds_passwordcomplexityenabled, u.fgpp_msds_lockoutduration, u.fgpp_msds_lockoutthreshold, u.fgpp_msds_lockoutobservationwindow", "output_type": "list" }, + "esc15_adcs_privilege_escalation": { + "name": "Check for potential ESC15 attacks (ADCS privilege escalation) and create associated edge", + "request": "MATCH p=(x:Base)-[:MemberOf*0..]->()-[:Enroll|AllExtendedRights]->(ct:CertTemplate)-[:PublishedTo]->(:EnterpriseCA)-[:TrustedForNTAuth]->(:NTAuthStore)-[:NTAuthStoreFor]->(d:Domain) WHERE ct.enrolleesuppliessubject = True AND ct.authenticationenabled = False AND ct.requiresmanagerapproval = False AND ct.schemaversion = 1 CREATE (x)-[:ADCSESC15]->(d)", + "output_type": "list", + "is_a_write_request": "true" + }, + "smb_signing": { + "name": "SMB signing", + "request": "MATCH (c:Computer) RETURN c.name AS name, c.domain AS domain, c.smbsigning AS smbsigning, c.is_dc AS dc, c.is_server AS server, toInteger(($extract_date$ - c.lastlogontimestamp)/86400) AS lastlogontimestamp", + "output_type": "dict" + }, + "ldap_server_configuration": { + "name": "LDAP and LDAPS configuration", + "request":"MATCH (c) WHERE c.ldapavailable OR c.ldapsavailable RETURN c.name AS name, c.domain AS domain, c.ldapavailable AS ldap, c.ldapsavailable AS ldaps, c.ldapsigning AS ldapsigning, c.ldapsepa AS ldapsepa", + "output_type": "dict" + }, "azure_set_gag": { "name": "Set gag=TRUE to Global admin group", "request": "MATCH (a:AZRole) WHERE a.name STARTS WITH 'GLOBAL ADMINISTRATOR@' SET a.is_gag=TRUE", diff --git a/ad_miner/sources/modules/smolcard_class.py b/ad_miner/sources/modules/smolcard_class.py index 0fdf36f2..525c49d0 100644 --- a/ad_miner/sources/modules/smolcard_class.py +++ b/ad_miner/sources/modules/smolcard_class.py @@ -52,7 +52,7 @@ def fillTemplate(self, template_raw: str, dict_of_value: dict) -> str: """ Fill the smolcard template with the data in dict_of_value. It extracts the {{something}} variables in the html template and replaces them with their value in the dict_of_value dictionnary. - Every ` char will be skipped. + Every \` char will be skipped. """ original = template_raw content = "" diff --git a/ad_miner/sources/modules/utils.py b/ad_miner/sources/modules/utils.py index 3468b7bb..591b5f9f 100755 --- a/ad_miner/sources/modules/utils.py +++ b/ad_miner/sources/modules/utils.py @@ -119,6 +119,12 @@ def args(): default="", help="Nodes of the cluster to run parallel neo4j queries. ex : host1:port1:nCore1,host2:port2:nCore2,...", ) + parser.add_argument( + "--previous_prefix", + type=str, + default="", + help="Last AD Miner cache prefix (used to highlight new objects)", + ) return parser.parse_args() @@ -144,9 +150,9 @@ def days_format(nb_days: int, critical_time=90) -> str: sortClass = str(nb_days).zfill(6) if nb_days is None: - return f" Unknown" + return f"Unknown" if nb_days > 19000: - return f" Never" + return f"Never" y = nb_days // 365 m = (nb_days % 365) // 30 d = (nb_days % 365) % 30 @@ -154,11 +160,11 @@ def days_format(nb_days: int, critical_time=90) -> str: color = "#b00404" if nb_days > 2 * critical_time else "#e36402" if nb_days > critical_time else "#0a6e01" if y > 0: - return f" {y} year{'s' if y > 1 else ''}, {m} month{'s' if m > 1 else ''} and {d} day{'s' if d > 1 else ''}" + return f"{y} year{'s' if y > 1 else ''}, {m} month{'s' if m > 1 else ''} and {d} day{'s' if d > 1 else ''}" elif m > 0: - return f" {m} month{'s' if m > 1 else ''} and {d} day{'s' if d > 1 else ''}" + return f"{m} month{'s' if m > 1 else ''} and {d} day{'s' if d > 1 else ''}" else: - return f" {d} day{'s' if d > 1 else ''}" + return f"{d} day{'s' if d > 1 else ''}" def grid_data_stringify(raw_data: dict) -> str: diff --git a/poetry.lock b/poetry.lock index d3693926..65be45d9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,137 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. + +[[package]] +name = "certifi" +version = "2026.1.4" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.7" +files = [ + {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"}, + {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +files = [ + {file = "charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ce8a0633f41a967713a59c4139d29110c07e826d131a316b50ce11b1d79b4f84"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaabd426fe94daf8fd157c32e571c85cb12e66692f15516a83a03264b08d06c3"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c4ef880e27901b6cc782f1b95f82da9313c0eb95c3af699103088fa0ac3ce9ac"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aaba3b0819274cc41757a1da876f810a3e4d7b6eb25699253a4effef9e8e4af"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:778d2e08eda00f4256d7f672ca9fef386071c9202f5e4607920b86d7803387f2"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f155a433c2ec037d4e8df17d18922c3a0d9b3232a396690f17175d2946f0218d"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a8bf8d0f749c5757af2142fe7903a9df1d2e8aa3841559b2bad34b08d0e2bcf3"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:194f08cbb32dc406d6e1aea671a68be0823673db2832b38405deba2fb0d88f63"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:6aee717dcfead04c6eb1ce3bd29ac1e22663cdea57f943c87d1eab9a025438d7"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cd4b7ca9984e5e7985c12bc60a6f173f3c958eae74f3ef6624bb6b26e2abbae4"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:b7cf1017d601aa35e6bb650b6ad28652c9cd78ee6caff19f3c28d03e1c80acbf"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e912091979546adf63357d7e2ccff9b44f026c075aeaf25a52d0e95ad2281074"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5cb4d72eea50c8868f5288b7f7f33ed276118325c1dfd3957089f6b519e1382a"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-win32.whl", hash = "sha256:837c2ce8c5a65a2035be9b3569c684358dfbf109fd3b6969630a87535495ceaa"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:44c2a8734b333e0578090c4cd6b16f275e07aa6614ca8715e6c038e865e70576"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win32.whl", hash = "sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50"}, + {file = "charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f"}, + {file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"}, +] [[package]] name = "colorama" @@ -11,6 +144,20 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "idna" +version = "3.11" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea"}, + {file = "idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "neo4j" version = "5.24.0" @@ -103,6 +250,27 @@ files = [ {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"}, ] +[[package]] +name = "requests" +version = "2.32.5" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.9" +files = [ + {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, + {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset_normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + [[package]] name = "tqdm" version = "4.66.4" @@ -123,7 +291,24 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] +[[package]] +name = "urllib3" +version = "2.6.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +files = [ + {file = "urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4"}, + {file = "urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed"}, +] + +[package.extras] +brotli = ["brotli (>=1.2.0)", "brotlicffi (>=1.2.0.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["backports-zstd (>=1.0.0)"] + [metadata] lock-version = "2.0" python-versions = ">=3.10" -content-hash = "7b87590c2c9b8cfe2e1a0543382806c2e2982b8c0c8ae7b981733c169c1ddf2f" +content-hash = "fad5947e08ae828864771a7835ddb90d86bba4acd36b407ea9b5e65ecdbc16cd" diff --git a/pyproject.toml b/pyproject.toml index 0e4d433a..45270b24 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,6 +12,7 @@ dev-dependencies = { } numpy = "==2.1.1" pytz = "==2022.7.1" tqdm = "==4.66.4" + requests = "^2.32.5" [tool.poetry.scripts] AD-miner = "ad_miner.__main__:main" diff --git a/requirements.txt b/requirements.txt index e89531d0..6bb571f6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,4 @@ neo4j==5.24.0 numpy==2.1.1 pytz==2022.7.1 tqdm==4.66.4 +requests==2.32.5