From b8f4c19d01135b85175f2848228e57a59965b145 Mon Sep 17 00:00:00 2001 From: John Walstra Date: Wed, 7 Jan 2026 11:04:36 -0600 Subject: [PATCH 01/24] DR-1166 Add ability to select configuration UID for PAM Allow configuration UID to be selected for gateways used by multiple PAM configuration records on commands. --- keepercommander/commands/discover/__init__.py | 149 ++++++- .../commands/discover/job_remove.py | 108 +++--- .../commands/discover/job_start.py | 22 +- .../commands/discover/job_status.py | 364 ++++++++++-------- .../commands/discover/result_process.py | 2 +- keepercommander/commands/discover/rule_add.py | 36 +- .../commands/discover/rule_list.py | 37 +- .../commands/discover/rule_remove.py | 19 +- .../commands/discover/rule_update.py | 29 +- keepercommander/commands/discoveryrotation.py | 6 +- keepercommander/commands/pam_debug/acl.py | 22 +- keepercommander/commands/pam_debug/gateway.py | 19 +- keepercommander/commands/pam_debug/graph.py | 90 +++-- keepercommander/commands/pam_debug/info.py | 10 +- keepercommander/commands/pam_debug/link.py | 21 +- .../commands/pam_debug/rotation_setting.py | 6 +- keepercommander/commands/pam_debug/verify.py | 19 +- keepercommander/commands/pam_debug/vertex.py | 30 +- keepercommander/commands/pam_saas/__init__.py | 4 +- keepercommander/commands/pam_saas/config.py | 29 +- keepercommander/commands/pam_saas/remove.py | 22 +- .../commands/pam_saas/{add.py => set.py} | 16 +- keepercommander/commands/pam_saas/update.py | 21 +- .../discovery_common/record_link.py | 2 +- keepercommander/discovery_common/types.py | 2 +- 25 files changed, 701 insertions(+), 384 deletions(-) rename keepercommander/commands/pam_saas/{add.py => set.py} (92%) diff --git a/keepercommander/commands/discover/__init__.py b/keepercommander/commands/discover/__init__.py index d73c744af..f9c6c653b 100644 --- a/keepercommander/commands/discover/__init__.py +++ b/keepercommander/commands/discover/__init__.py @@ -13,8 +13,7 @@ from ...discovery_common.constants import PAM_USER, PAM_MACHINE, PAM_DATABASE, PAM_DIRECTORY import json import base64 - -from typing import List, Optional, Union, TYPE_CHECKING +from typing import List, Optional, Union, Callable, Tuple, Any, Dict, TYPE_CHECKING if TYPE_CHECKING: from ...params import KeeperParams @@ -22,7 +21,36 @@ from ...proto import pam_pb2 +class MultiConfigurationException(Exception): + """ + If the gateway has multiple configuration + """ + def __init__(self, items: List[Dict]): + super().__init__() + self.items = items + + def print_items(self): + for item in self.items: + record = item["configuration_record"] # type: KeeperRecord + print(f" * {record.record_uid} - {record.title}") + + class GatewayContext: + + """ + Context for a gateway and a configuration. + + In the configuration record, the gateway is selected. + This means multiple configuration can use the same gateway. + Commander is gateway centric, we need to treat gateway and configuration as a `primary key` + + Since we get the configuration record from the vault, go through each of them and see if that gateway + is only used by one configuration. + If it is, then that gateway and configuration pair are used. + If there are multiple configuration, we need to throw an MultiConfigurationException. + + """ + def __init__(self, configuration: KeeperRecord, facade: PamConfigurationRecordFacade, gateway: pam_pb2.PAMController, application: ApplicationRecord): self.configuration = configuration @@ -36,7 +64,42 @@ def all_gateways(params: KeeperParams): return get_all_gateways(params) @staticmethod - def from_configuration_uid(params: KeeperParams, configuration_uid: str, gateways: Optional[List] = None): + def find_gateway(params: KeeperParams, find_func: Callable, gateways: Optional[List] = None) \ + -> Tuple[Optional[GatewayContext], Any]: + + """ + Populate the context from matching using the function passed in. + The function needs to return a non-None value to be considered a positive match. + + """ + + if gateways is None: + gateways = GatewayContext.all_gateways(params) + + configuration_records = list(vault_extensions.find_records(params, "pam.*Configuration")) + for configuration_record in configuration_records: + payload = find_func( + configuration_record=configuration_record + ) + if payload is not None: + return GatewayContext.from_configuration_uid( + params=params, + configuration_uid=configuration_record.record_uid, + gateways=gateways + ), payload + + return None, None + + @staticmethod + def from_configuration_uid(params: KeeperParams, configuration_uid: str, gateways: Optional[List] = None) \ + -> Optional[GatewayContext]: + + """ + Populate context using the configuration UID. + + From the configuration record, get the gateway from the settings. + + """ if gateways is None: gateways = GatewayContext.all_gateways(params) @@ -44,7 +107,7 @@ def from_configuration_uid(params: KeeperParams, configuration_uid: str, gateway configuration_record = vault.KeeperRecord.load(params, configuration_uid) if not isinstance(configuration_record, vault.TypedRecord): print(f'{bcolors.FAIL}PAM Configuration [{configuration_uid}] is not available.{bcolors.ENDC}') - return + return None configuration_facade = PamConfigurationRecordFacade() configuration_facade.record = configuration_record @@ -55,7 +118,7 @@ def from_configuration_uid(params: KeeperParams, configuration_uid: str, gateway None) if gateway is None: - return + return None application_id = utils.base64_url_encode(gateway.applicationUid) application = KSMCommand.get_app_record(params, application_id) @@ -68,35 +131,83 @@ def from_configuration_uid(params: KeeperParams, configuration_uid: str, gateway ) @staticmethod - def from_gateway(params: KeeperParams, gateway: str): - # Get all the PAM configuration records + def from_gateway(params: KeeperParams, gateway: str, configuration_uid: Optional[str] = None) \ + -> Optional[GatewayContext]: + + """ + Populate context use the gateway, and optional configuration UID. + + This will scan all configuration to find which ones use this gateway. + If there are multiple ones, a MultiConfigurationException is thrown. + If there is only one gateway, then that gateway is used. + + """ + # Get all the PAM configuration records in the Vault; not Application configuration_records = list(vault_extensions.find_records(params, "pam.*Configuration")) + + if configuration_uid: + logging.debug(f"find the gateway with configuration record {configuration_uid}") + + # You get this if the user has not setup any PAM related records. if len(configuration_records) == 0: print(f"{bcolors.FAIL}Cannot find any PAM configuration records in the Vault{bcolors.ENDC}") + return None all_gateways = get_all_gateways(params) + found_items = [] + for configuration_record in configuration_records: - for record in configuration_records: - - logging.debug(f"checking configuration record {record.title}") + logging.debug(f"checking configuration record {configuration_record.title}") # Load the configuration record and get the gateway_uid from the facade. - configuration_record = vault.KeeperRecord.load(params, record.record_uid) + configuration_record = vault.KeeperRecord.load(params, configuration_record.record_uid) configuration_facade = PamConfigurationRecordFacade() configuration_facade.record = configuration_record configuration_gateway_uid = configuration_facade.controller_uid if configuration_gateway_uid is None: - logging.debug(f"configuration {configuration_record.title} does not have a gateway set, skipping.") + logging.debug(f" * configuration {configuration_record.title} does not have a gateway set, skipping.") continue # Get the gateway for this configuration found_gateway = next((x for x in all_gateways if utils.base64_url_encode(x.controllerUid) == configuration_gateway_uid), None) if found_gateway is None: - logging.debug(f"cannot find gateway for configuration {configuration_record.title}, skipping.") + logging.debug(f" * configuration does not use desired gateway") continue + # If the configuration_uid was passed in, and we find it, just set the found items to this + # configuration and stop checking for more. + if configuration_uid is not None and configuration_uid == configuration_record.record_uid: + logging.debug(f" * configuration record uses this gateway and matches desire configuration, " + "skipping the rest") + found_items = [{ + "configuration_facade": configuration_facade, + "configuration_record": configuration_record, + "gateway": found_gateway + }] + break + + if (utils.base64_url_encode(found_gateway.controllerUid) == gateway or + found_gateway.controllerName.lower() == gateway.lower()): + logging.debug(f" * configuration record uses this gateway") + found_items.append({ + "configuration_facade": configuration_facade, + "configuration_record": configuration_record, + "gateway": found_gateway + }) + + if len(found_items) > 1: + logging.debug(f"found {len(found_items)} configurations using this gateway") + raise MultiConfigurationException( + items=found_items + ) + + if len(found_items) == 1: + found_gateway = found_items[0]["gateway"] + configuration_record = found_items[0]["configuration_record"] + configuration_facade = found_items[0]["configuration_facade"] + application_id = utils.base64_url_encode(found_gateway.applicationUid) application = KSMCommand.get_app_record(params, application_id) if application is None: @@ -158,7 +269,7 @@ def get_shared_folders(self, params: KeeperParams) -> List[dict]: return self._shared_folders def decrypt(self, cipher_base64: bytes) -> dict: - ciphertext = base64.b64decode(cipher_base64) + ciphertext = base64.b64decode(cipher_base64.decode()) return json.loads(decrypt_aes_v2(ciphertext, self.configuration.record_key)) def encrypt(self, data: dict) -> str: @@ -255,3 +366,13 @@ def _p(msg): @staticmethod def _n(record_type): return PAMGatewayActionDiscoverCommandBase.type_name_map.get(record_type, "PAM Configuration") + + +def multi_conf_msg(gateway: str, err: MultiConfigurationException): + print("") + print(f"{bcolors.FAIL}Found multiple configuration records for gateway {gateway}.{bcolors.ENDC}") + print("") + print(f"Please use the --configuration-uid parameter to select the configuration.") + print(f"Available configurations are: ") + err.print_items() + print("") \ No newline at end of file diff --git a/keepercommander/commands/discover/job_remove.py b/keepercommander/commands/discover/job_remove.py index 2e772c492..fed280496 100644 --- a/keepercommander/commands/discover/job_remove.py +++ b/keepercommander/commands/discover/job_remove.py @@ -4,14 +4,23 @@ from . import PAMGatewayActionDiscoverCommandBase, GatewayContext from ..pam.pam_dto import GatewayActionDiscoverJobRemoveInputs, GatewayActionDiscoverJobRemove, GatewayAction from ..pam.router_helper import router_send_action_to_gateway, router_get_connected_gateways -from ... import vault_extensions from ...display import bcolors from ...discovery_common.jobs import Jobs from ...proto import pam_pb2 +from typing import Optional, Dict class PAMGatewayActionDiscoverJobRemoveCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-discover-remove') + + """ + Remove a discovery job. + + This will attempt to remove the job from the gateway if running. + And it will remove the current job from the Jobs graph. + + """ + + parser = argparse.ArgumentParser(prog='pam action discover remove') parser.add_argument('--job-id', '-j', required=True, dest='job_id', action='store', help='Discovery job id.') @@ -25,54 +34,59 @@ def execute(self, params, **kwargs): job_id = kwargs.get("job_id") - # Get all the PAM configuration records - configuration_records = list(vault_extensions.find_records(params, "pam.*Configuration")) + # Get all the gateways here so we don't have to keep calling this method. + # It gets passed into find_gateway, and find_gateway will pass it around. all_gateways = GatewayContext.all_gateways(params) - for configuration_record in configuration_records: - - gateway_context = GatewayContext.from_configuration_uid(params=params, - configuration_uid=configuration_record.record_uid, - gateways=all_gateways) - if gateway_context is None: - continue - - jobs = Jobs(record=configuration_record, params=params) - job_item = jobs.get_job(job_id) + def _find_job(configuration_record) -> Optional[Dict]: + jobs_obj = Jobs(record=configuration_record, params=params) + job_item = jobs_obj.get_job(job_id) if job_item is not None: - - try: - # First, cancel the running discovery job if it is running. - logging.debug("cancel job on the gateway, if running") - action_inputs = GatewayActionDiscoverJobRemoveInputs( - configuration_uid=gateway_context.configuration_uid, - job_id=job_id - ) - - conversation_id = GatewayAction.generate_conversation_id() - router_response = router_send_action_to_gateway( - params=params, - gateway_action=GatewayActionDiscoverJobRemove( - inputs=action_inputs, - conversation_id=conversation_id), - message_type=pam_pb2.CMT_DISCOVERY, - is_streaming=False, - destination_gateway_uid_str=gateway_context.gateway_uid - ) - - data = self.get_response_data(router_response) - if data is None: - raise Exception("The router returned a failure.") - elif data.get("success") is False: - error = data.get("error") - raise Exception(f"Discovery job was not removed: {error}") - except Exception as err: - logging.debug(f"gateway return error removing discovery job: {err}") - - jobs.cancel(job_id) - - print(f"{bcolors.OKGREEN}Discovery job has been removed or cancelled.{bcolors.ENDC}") - return + return { + "jobs": jobs_obj, + } + return None + + gateway_context, payload = GatewayContext.find_gateway(params=params, + find_func=_find_job, + gateways=all_gateways) + + if gateway_context is not None: + jobs = payload["jobs"] + + try: + # First, cancel the running discovery job if it is running. + logging.debug("cancel job on the gateway, if running") + action_inputs = GatewayActionDiscoverJobRemoveInputs( + configuration_uid=gateway_context.configuration_uid, + job_id=job_id + ) + + conversation_id = GatewayAction.generate_conversation_id() + router_response = router_send_action_to_gateway( + params=params, + gateway_action=GatewayActionDiscoverJobRemove( + inputs=action_inputs, + conversation_id=conversation_id), + message_type=pam_pb2.CMT_DISCOVERY, + is_streaming=False, + destination_gateway_uid_str=gateway_context.gateway_uid + ) + + data = self.get_response_data(router_response) + if data is None: + raise Exception("The router returned a failure.") + elif data.get("success") is False: + error = data.get("error") + raise Exception(f"Discovery job was not removed: {error}") + except Exception as err: + logging.debug(f"gateway return error removing discovery job: {err}") + + jobs.cancel(job_id) + jobs.close() + + print(f"{bcolors.OKGREEN}Discovery job has been removed or cancelled.{bcolors.ENDC}") + return print(f'{bcolors.FAIL}Discovery job not found. Cannot get remove the job.{bcolors.ENDC}') return diff --git a/keepercommander/commands/discover/job_start.py b/keepercommander/commands/discover/job_start.py index bfb324536..e7f701990 100644 --- a/keepercommander/commands/discover/job_start.py +++ b/keepercommander/commands/discover/job_start.py @@ -2,7 +2,7 @@ import argparse import logging import json -from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from .job_status import PAMGatewayActionDiscoverJobStatusCommand from ..pam.router_helper import router_send_action_to_gateway, print_router_response, router_get_connected_gateways from ..pam.user_facade import PamUserRecordFacade @@ -20,11 +20,14 @@ class PAMGatewayActionDiscoverJobStartCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-discover-start') + parser = argparse.ArgumentParser(prog='pam action discover start') parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name of UID.') + parser.add_argument('--configuration-uid', '-c', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') parser.add_argument('--resource', '-r', required=False, dest='resource_uid', action='store', help='UID of the resource record. Set to discover specific resource.') + parser.add_argument('--lang', required=False, dest='language', action='store', default="en_US", help='Language') parser.add_argument('--include-machine-dir-users', required=False, dest='include_machine_dir_users', @@ -41,6 +44,7 @@ class PAMGatewayActionDiscoverJobStartCommand(PAMGatewayActionDiscoverCommandBas action='store_true', help='Skip discovering directories.') parser.add_argument('--skip-cloud-users', required=False, dest='skip_cloud_users', action='store_true', help='Skip discovering cloud users.') + # parser.add_argument('--cred', required=False, dest='credentials', # action='append', help='List resource credentials.') # parser.add_argument('--cred-file', required=False, dest='credential_file', @@ -97,10 +101,16 @@ def execute(self, params, **kwargs): # Load the configuration record and get the gateway_uid from the facade. gateway = kwargs.get('gateway') - - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + gateway_context = None + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=kwargs.get('configuration_uid')) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return jobs = Jobs(record=gateway_context.configuration, params=params) diff --git a/keepercommander/commands/discover/job_status.py b/keepercommander/commands/discover/job_status.py index 8bab8cfec..64dacc78d 100644 --- a/keepercommander/commands/discover/job_status.py +++ b/keepercommander/commands/discover/job_status.py @@ -1,6 +1,5 @@ from __future__ import annotations import argparse -import json import logging from . import PAMGatewayActionDiscoverCommandBase, GatewayContext from ..pam.router_helper import router_get_connected_gateways @@ -11,9 +10,10 @@ from ...discovery_common.constants import DIS_INFRA_GRAPH_ID from ...discovery_common.types import DiscoveryDelta, DiscoveryObject from ...keeper_dag.dag import DAG -from typing import TYPE_CHECKING +from typing import Optional, Dict, List, TYPE_CHECKING if TYPE_CHECKING: + from ...params import KeeperParams from ...discovery_common.jobs import JobItem @@ -34,29 +34,43 @@ def _b(text): class PAMGatewayActionDiscoverJobStatusCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-discover-status') + """ + Get the status of discovery jobs. + + If no parameters are given, it will check all gateways for discovery job status. + + """ + + parser = argparse.ArgumentParser(prog='pam action discover status') parser.add_argument('--gateway', '-g', required=False, dest='gateway', action='store', help='Show only discovery jobs from a specific gateway.') parser.add_argument('--job-id', '-j', required=False, dest='job_id', action='store', help='Detailed information for a specific discovery job.') - # parser.add_argument('--file', required=False, dest='json_file', action='store', - # help='Save status to JSON file.') parser.add_argument('--history', required=False, dest='show_history', action='store_true', help='Show history') + parser.add_argument('--configuration-uid', '-c', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID is using --history') def get_parser(self): return PAMGatewayActionDiscoverJobStatusCommand.parser - def job_detail(self, job): - pass - @staticmethod - def print_job_table(jobs, max_gateway_name, show_history=False): + def print_job_table(jobs: List[Dict], + max_gateway_name: int, + show_history: bool = False): + + """ + Print jobs in a table. + + This method takes a list of dictionary item which contains the cooked job information. + + """ print("") print(f"{bcolors.HEADER}{'Job ID'.ljust(14, ' ')} " f"{'Gateway Name'.ljust(max_gateway_name, ' ')} " f"{'Gateway UID'.ljust(22, ' ')} " + f"{'Configuration UID'.ljust(22, ' ')} " f"{'Status'.ljust(12, ' ')} " f"{'Resource UID'.ljust(22, ' ')} " f"{'Started'.ljust(19, ' ')} " @@ -67,6 +81,7 @@ def print_job_table(jobs, max_gateway_name, show_history=False): print(f"{''.ljust(14, '=')} " f"{''.ljust(max_gateway_name, '=')} " f"{''.ljust(22, '=')} " + f"{''.ljust(22, '=')} " f"{''.ljust(12, '=')} " f"{''.ljust(22, '=')} " f"{''.ljust(19, '=')} " @@ -92,6 +107,7 @@ def print_job_table(jobs, max_gateway_name, show_history=False): print(f"{color}{job_id} " f"{job['gateway'].ljust(max_gateway_name, ' ')} " f"{job['gateway_uid']} " + f"{job['configuration_uid']} " f"{job['status'].ljust(12, ' ')} " f"{(job.get('resource_uid') or 'NA').ljust(22, ' ')} " f"{(job.get('start_ts_str') or 'NA').ljust(19, ' ')} " @@ -141,177 +157,213 @@ def print_job_table(jobs, max_gateway_name, show_history=False): print("") @staticmethod - def print_job_detail(params, gateway_context, jobs, job_id): - - infra = Infrastructure(record=gateway_context.configuration, params=params) - - for job in jobs: - if job_id == job["job_id"]: - gateway_context = job["gateway_context"] - if job['status'] == "COMPLETE": - color = bcolors.OKGREEN - elif job['status'] == "RUNNING": - color = bcolors.OKBLUE + def print_job_detail(params: KeeperParams, + all_gateways: List, + job_id: str): + + def _find_job(configuration_record) -> Optional[Dict]: + jobs_obj = Jobs(record=configuration_record, params=params) + job_item = jobs_obj.get_job(job_id) + if job_item is not None: + return { + "jobs": jobs_obj, + } + return None + + gateway_context, payload = GatewayContext.find_gateway(params=params, + find_func=_find_job, + gateways=all_gateways) + + if gateway_context is not None: + jobs = payload["jobs"] + job = jobs.get_job(job_id) # type: JobItem + infra = Infrastructure(record=gateway_context.configuration, params=params) + + color = bcolors.OKBLUE + status = "RUNNING" + if job.end_ts is not None: + if job.success is None: + color = bcolors.WHITE + status = "CANCELLED" else: - color = bcolors.FAIL - status = f"{color}{job['status']}{bcolors.ENDC}" + color = bcolors.OKGREEN + status = "COMPLETE" + elif job.error: + color = bcolors.FAIL + status = "FAILED" + + status = f"{color}{status}{bcolors.ENDC}" + print("") + print(f"{_h('Job ID')}: {job.job_id}") + print(f"{_h('Sync Point')}: {job.sync_point}") + print(f"{_h('Gateway Name')}: {gateway_context.gateway_name}") + print(f"{_h('Gateway UID')}: {gateway_context.gateway_uid}") + print(f"{_h('Configuration UID')}: {gateway_context.configuration_uid}") + print(f"{_h('Status')}: {status}") + print(f"{_h('Resource UID')}: {job.resource_uid or 'NA'}") + print(f"{_h('Started')}: {job.start_ts_str}") + print(f"{_h('Completed')}: {job.end_ts_str}") + print(f"{_h('Duration')}: {job.duration_sec_str}") + + # If it failed, show the error and stacktrace. + if status == "FAILED": print("") - print(f"{_h('Job ID')}: {job['job_id']}") - print(f"{_h('Sync Point')}: {job['sync_point']}") - print(f"{_h('Gateway Name')}: {job['gateway']}") - print(f"{_h('Gateway UID')}: {job['gateway_uid']}") - print(f"{_h('Configuration UID')}: {gateway_context.configuration_uid}") - print(f"{_h('Status')}: {status}") - print(f"{_h('Resource UID')}: {job.get('resource_uid', 'NA')}") - print(f"{_h('Started')}: {job['start_ts_str']}") - print(f"{_h('Completed')}: {job.get('end_ts_str')}") - print(f"{_h('Duration')}: {job.get('duration')}") - - # If it failed, show the error and stacktrace. - if job['status'] == "FAILED": - print("") - print(f"{_h('Gateway Error')}:") - print(f"{color}{job['error']}{bcolors.ENDC}") + print(f"{_h('Gateway Error')}:") + print(f"{color}{job['error']}{bcolors.ENDC}") + print("") + print(f"{_h('Gateway Stacktrace')}:") + print(f"{color}{job['stacktrace']}{bcolors.ENDC}") + # If it finished, show information about what was discovered. + elif job.end_ts is not None: + + try: + infra.load(sync_point=0) print("") - print(f"{_h('Gateway Stacktrace')}:") - print(f"{color}{job['stacktrace']}{bcolors.ENDC}") - # If it finished, show information about what was discovered. - elif job.get('end_ts') is not None: - job_item = job.get("job_item") # type: JobItem - - try: - infra.load(sync_point=0) - print("") - delta_json = job.get('delta') - if delta_json is not None: - delta = DiscoveryDelta.model_validate(delta_json) - print(f"{_h('Added')} - {len(delta.added)} count") - for item in delta.added: - vertex = infra.dag.get_vertex(item.uid) - if vertex is None or vertex.active is False or vertex.has_data is False: - logging.debug("added: vertex is none, inactive or has no data") - continue - discovery_object = DiscoveryObject.get_discovery_object(vertex) - print(f" * {discovery_object.description}") - - print("") - print(f"{_h('Changed')} - {len(delta.changed)} count") - for item in delta.changed: - vertex = infra.dag.get_vertex(item.uid) - if vertex is None or vertex.active is False or vertex.has_data is False: - logging.debug("changed: vertex is none, inactive or has no data") - continue - discovery_object = DiscoveryObject.get_discovery_object(vertex) - print(f" * {discovery_object.description}") - if item.changes is None: - print(f" no changed, may be a object not added in prior discoveries.") - else: - for key, value in item.changes.items(): - print(f" - {key} = {value}") - - print("") - print(f"{_h('Deleted')} - {len(delta.deleted)} count") - for item in delta.deleted: - print(f" * discovery vertex {item.uid}") - else: - print(f"{_f('There are no available delta changes for this job.')}") - - except Exception as err: - print(f"{_f('Could not load delta from infrastructure: ' + str(err))}") - print("Fall back to raw graph.") + delta_json = job.delta + if delta_json is not None: + delta = DiscoveryDelta.model_validate(delta_json) + print(f"{_h('Added')} - {len(delta.added)} count") + for item in delta.added: + vertex = infra.dag.get_vertex(item.uid) + if vertex is None or vertex.active is False or vertex.has_data is False: + logging.debug("added: vertex is none, inactive or has no data") + continue + discovery_object = DiscoveryObject.get_discovery_object(vertex) + print(f" * {discovery_object.description}") + print("") - dag = DAG(conn=infra.conn, record=infra.record, graph_id=DIS_INFRA_GRAPH_ID) - print(dag.to_dot_raw(sync_point=job_item.sync_point, rank_dir="RL")) + print(f"{_h('Changed')} - {len(delta.changed)} count") + for item in delta.changed: + vertex = infra.dag.get_vertex(item.uid) + if vertex is None or vertex.active is False or vertex.has_data is False: + logging.debug("changed: vertex is none, inactive or has no data") + continue + discovery_object = DiscoveryObject.get_discovery_object(vertex) + print(f" * {discovery_object.description}") + if item.changes is None: + print(f" no changed, may be a object not added in prior discoveries.") + else: + for key, value in item.changes.items(): + print(f" - {key} = {value}") - return + print("") + print(f"{_h('Deleted')} - {len(delta.deleted)} count") + for item in delta.deleted: + print(f" * discovery vertex {item.uid}") + else: + print(f"{_f('There are no available delta changes for this job.')}") + + except Exception as err: + print(f"{_f('Could not load delta from infrastructure: ' + str(err))}") + print("Fall back to raw graph.") + print("") + dag = DAG(conn=infra.conn, record=infra.record, graph_id=DIS_INFRA_GRAPH_ID) + print(dag.to_dot_raw(sync_point=job.sync_point, rank_dir="RL")) - print(f"{bcolors.FAIL}Cannot find the job{bcolors.ENDC}") + else: + print(f"{bcolors.FAIL}Could not find the gateway with job {job_id}.") def execute(self, params, **kwargs): if not hasattr(params, 'pam_controllers'): router_get_connected_gateways(params) + # If this is set, only show status for this gateway and history for this gateway. gateway_filter = kwargs.get("gateway") + + # If this is set, only show detailed information about this job. job_id = kwargs.get("job_id") + + # Show the history for the gateway. + # gateway_filter needs to be set for show_history = kwargs.get("show_history") - if job_id is not None: - show_history = True + # Get all the gateways here so we don't have to keep calling this method. + # It gets passed into find_gateway, and find_gateway will pass it around. + all_gateways = GatewayContext.all_gateways(params) - # Get all the PAM configuration records - configuration_records = list(vault_extensions.find_records(params, "pam.*Configuration")) + # If we are showing all gateways, disable show history. + # History is shown for a specific gateway. + if gateway_filter is None: + show_history = False # This is used to format the table. Start with a length of 12 characters for the gateway. max_gateway_name = 12 - all_jobs = [] - all_gateways = GatewayContext.all_gateways(params) + # If we have a job id, only display information about the one job + if job_id: + self.print_job_detail(params=params, + all_gateways=all_gateways, + job_id=job_id) - # For each configuration/ gateway, we are going to get all jobs. - # We are going to query the gateway for any updated status. - gateway_context = None - for configuration_record in configuration_records: + # Else show jobs in a table + else: - gateway_context = GatewayContext.from_configuration_uid(params=params, - configuration_uid=configuration_record.record_uid, - gateways=all_gateways) - if gateway_context is None: - continue + # Based on parameters set by user, select specific jobs to be displayed. + selected_jobs = [] # type: List[Dict] - # If we are using a gateway filter, and this gateway is not the one, then go onto the next conf/gateway. - if gateway_filter is not None and gateway_context.is_gateway(gateway_filter) is False: - continue + # For each configuration/ gateway, we are going to get all jobs. + # We are going to query the gateway for any updated status. - # If the gateway name is longer that the prior, set the max length to this gateway's name. - if len(gateway_context.gateway_name) > max_gateway_name: - max_gateway_name = len(gateway_context.gateway_name) + configuration_records = list(vault_extensions.find_records(params, "pam.*Configuration")) + for configuration_record in configuration_records: - jobs = Jobs(record=configuration_record, params=params) - if show_history is True: - job_list = reversed(jobs.history) - else: - job_list = [] - if jobs.current_job is not None: - job_list = [jobs.current_job] - - for job_item in job_list: - job = job_item.model_dump() - job["status"] = "RUNNING" - if job_item.start_ts is not None: - job["start_ts_str"] = job_item.start_ts_str - if job_item.end_ts is not None: - job["end_ts_str"] = job_item.end_ts_str - job["status"] = "COMPLETE" - - job["duration"] = job_item.duration_sec_str - - job["gateway"] = gateway_context.gateway_name - job["gateway_uid"] = gateway_context.gateway_uid - - # This is needs for details - job["gateway_context"] = gateway_context - job["job_item"] = job_item - - if job_item.success is False: - job["status"] = "FAILED" - - all_jobs.append(job) - - # Instead of printing a table, save a json file. - if kwargs.get("json_file") is not None: - with open(kwargs.get("json_file"), "w") as fh: - fh.write(json.dumps(all_jobs, indent=4)) - fh.close() - return - - if len(all_jobs) == 0: - print(f"{bcolors.FAIL}There are no discovery jobs. Use 'pam action discover start' to start a " - f"discovery job.{bcolors.ENDC}") - return - - if job_id is not None and gateway_context is not None: - self.print_job_detail(params, gateway_context, all_jobs, job_id) - else: - self.print_job_table(all_jobs, max_gateway_name, show_history) + gateway_context = GatewayContext.from_configuration_uid( + params=params, + configuration_uid=configuration_record.record_uid, + gateways=all_gateways) + + if gateway_context is None: + continue + + # If we are using a gateway filter, and this gateway is not the one, then go onto the next conf/gateway. + if gateway_filter is not None and gateway_context.is_gateway(gateway_filter) is False: + continue + + # If the gateway name is longer that the prior, set the max length to this gateway's name. + if len(gateway_context.gateway_name) > max_gateway_name: + max_gateway_name = len(gateway_context.gateway_name) + + jobs = Jobs(record=configuration_record, params=params) + if show_history is True: + job_list = reversed(jobs.history) + else: + job_list = [] + if jobs.current_job is not None: + job_list = [jobs.current_job] + + for job_item in job_list: + job = job_item.model_dump() + job["status"] = "RUNNING" + if job_item.start_ts is not None: + job["start_ts_str"] = job_item.start_ts_str + if job_item.end_ts is not None: + job["end_ts_str"] = job_item.end_ts_str + job["status"] = "COMPLETE" + + job["duration"] = job_item.duration_sec_str + + job["gateway"] = gateway_context.gateway_name + job["gateway_uid"] = gateway_context.gateway_uid + job["configuration_uid"] = gateway_context.configuration_uid + + # This is needs for details + job["gateway_context"] = gateway_context + job["job_item"] = job_item + + if job_item.success is None: + job["status"] = "CANCELLED" + elif not job_item.success: + job["status"] = "FAILED" + + selected_jobs.append(job) + + if len(selected_jobs) == 0: + print(f"{bcolors.FAIL}There are no discovery jobs. Use 'pam action discover start' to start a " + f"discovery job.{bcolors.ENDC}") + return + + self.print_job_table(jobs=selected_jobs, + max_gateway_name=max_gateway_name, + show_history=show_history) diff --git a/keepercommander/commands/discover/result_process.py b/keepercommander/commands/discover/result_process.py index 1653cbccd..c9ae11118 100644 --- a/keepercommander/commands/discover/result_process.py +++ b/keepercommander/commands/discover/result_process.py @@ -67,7 +67,7 @@ class PAMGatewayActionDiscoverResultProcessCommand(PAMGatewayActionDiscoverComma Process the discovery data """ - parser = argparse.ArgumentParser(prog='pam-action-discover-process') + parser = argparse.ArgumentParser(prog='pam action discover process') parser.add_argument('--job-id', '-j', required=True, dest='job_id', action='store', help='Discovery job to process.') parser.add_argument('--add-all', required=False, dest='add_all', action='store_true', diff --git a/keepercommander/commands/discover/rule_add.py b/keepercommander/commands/discover/rule_add.py index 2dedaa089..2bf1d7bb9 100644 --- a/keepercommander/commands/discover/rule_add.py +++ b/keepercommander/commands/discover/rule_add.py @@ -1,23 +1,26 @@ from __future__ import annotations import argparse import logging -from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from ..pam.pam_dto import GatewayActionDiscoverRuleValidateInputs, GatewayActionDiscoverRuleValidate, GatewayAction from ..pam.router_helper import router_send_action_to_gateway, router_get_connected_gateways from ...display import bcolors from ...proto import pam_pb2 from ...discovery_common.rule import Rules -from ...discovery_common.types import ActionRuleItem -from typing import TYPE_CHECKING +from ...discovery_common.types import ActionRuleItem, Statement +from typing import List, TYPE_CHECKING if TYPE_CHECKING: from ...params import KeeperParams class PAMGatewayActionDiscoverRuleAddCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-discover-rule-add') + parser = argparse.ArgumentParser(prog='pam action discover rule add') parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name of UID.') + parser.add_argument('--configuration-uid', '-c', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') + parser.add_argument('--action', '-a', required=True, choices=['add', 'ignore', 'prompt'], dest='rule_action', action='store', help='Action to take if rule matches') parser.add_argument('--priority', '-p', required=True, dest='priority', action='store', type=int, @@ -37,7 +40,8 @@ def get_parser(self): return PAMGatewayActionDiscoverRuleAddCommand.parser @staticmethod - def validate_rule_statement(params: KeeperParams, gateway_context: GatewayContext, statement: str): + def validate_rule_statement(params: KeeperParams, gateway_context: GatewayContext, statement: str) \ + -> List[Statement]: # Send rule the gateway to be validated. The rule is encrypted. It might contain sensitive information. action_inputs = GatewayActionDiscoverRuleValidateInputs( @@ -67,6 +71,15 @@ def validate_rule_statement(params: KeeperParams, gateway_context: GatewayContex logging.debug(f"Rule Structure = {statement_struct}") if not isinstance(statement_struct, list): raise Exception(f"The structured rule statement is not a list.") + ret = [] + for item in statement_struct: + ret.append( + Statement( + field=item.get("field"), + operator=item.get("operator"), + value=item.get("value") + ) + ) return statement_struct @@ -77,9 +90,16 @@ def execute(self, params, **kwargs): try: gateway = kwargs.get("gateway") - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f'{bcolors.FAIL}Discovery job gateway [{gateway}] was not found.{bcolors.ENDC}') + + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=kwargs.get('configuration_uid')) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return # If we are setting the shared_folder_uid, make sure it exists. diff --git a/keepercommander/commands/discover/rule_list.py b/keepercommander/commands/discover/rule_list.py index ad06d02ee..9819fc361 100644 --- a/keepercommander/commands/discover/rule_list.py +++ b/keepercommander/commands/discover/rule_list.py @@ -1,6 +1,6 @@ from __future__ import annotations import argparse -from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from ...display import bcolors from ..pam.router_helper import router_get_connected_gateways from ...discovery_common.rule import Rules @@ -12,9 +12,12 @@ class PAMGatewayActionDiscoverRuleListCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-discover-rule-list') + parser = argparse.ArgumentParser(prog='pam action discover rule list') parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name of UID.') + parser.add_argument('--configuration-uid', '-c', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') + parser.add_argument('--search', '-s', required=False, dest='search', action='store', help='Search for rules.') @@ -64,9 +67,15 @@ def print_rule_table(rule_list: List[RuleItem]): if rule.name is not None: name = rule.name + color = bcolors.FAIL + action_value = f"NONE" + if rule.action is not None: + color = "" + action_value = rule.action.value + print(f"{bcolors.OKGREEN}{rule.rule_id.ljust(14, ' ')}{bcolors.ENDC} " f"{name[:20].ljust(20, ' ')} " - f"{rule.action.value.ljust(6, ' ')} " + f"{color}{action_value.ljust(6, ' ')}{bcolors.ENDC} " f"{str(rule.priority).rjust(8, ' ')} " f"{ignore_case_str.ljust(12, ' ')} " f"{rule.added_ts_str.ljust(19, ' ')} " @@ -80,17 +89,29 @@ def execute(self, params, **kwargs): router_get_connected_gateways(params) gateway = kwargs.get("gateway") - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f'{bcolors.FAIL}Discovery job gateway [{gateway}] was not found.{bcolors.ENDC}') + configuration_uid = kwargs.get('configuration_uid') + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=configuration_uid) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return rules = Rules(record=gateway_context.configuration, params=params) rule_list = rules.rule_list(rule_type=RuleTypeEnum.ACTION, search=kwargs.get("search")) # type: List[RuleItem] if len(rule_list) == 0: - print(f"{bcolors.FAIL}There are no rules. Use 'pam action discovery rule add' " - f"to create rules.{bcolors.ENDC}") + print("") + text = f"{bcolors.FAIL}There are no rules. " \ + f"Use 'pam action discover rule add -g {gateway_context.gateway_uid} " + if configuration_uid: + text += f"-c {gateway_context.configuration_uid}' " + text += f"to create rules.{bcolors.ENDC}" + print(text) return self.print_rule_table(rule_list=rule_list) diff --git a/keepercommander/commands/discover/rule_remove.py b/keepercommander/commands/discover/rule_remove.py index 27f4fbcc0..b093dfa57 100644 --- a/keepercommander/commands/discover/rule_remove.py +++ b/keepercommander/commands/discover/rule_remove.py @@ -1,5 +1,5 @@ import argparse -from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from ..pam.router_helper import router_get_connected_gateways from ...display import bcolors from ...discovery_common.rule import Rules @@ -7,9 +7,12 @@ class PAMGatewayActionDiscoverRuleRemoveCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-discover-rule-remove') + parser = argparse.ArgumentParser(prog='pam action discover rule remove') parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name of UID') + parser.add_argument('--configuration-uid', '-c', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') + parser.add_argument('--rule-id', '-i', required=False, dest='rule_id', action='store', help='Identifier for the rule') parser.add_argument('--remove-all', required=False, dest='remove_all', action='store_true', @@ -24,9 +27,15 @@ def execute(self, params, **kwargs): router_get_connected_gateways(params) gateway = kwargs.get("gateway") - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f'{bcolors.FAIL}Discovery job gateway [{gateway}] was not found.{bcolors.ENDC}') + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=kwargs.get('configuration_uid')) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return rule_id = kwargs.get("rule_id") diff --git a/keepercommander/commands/discover/rule_update.py b/keepercommander/commands/discover/rule_update.py index f4642d061..b3e063bf5 100644 --- a/keepercommander/commands/discover/rule_update.py +++ b/keepercommander/commands/discover/rule_update.py @@ -1,16 +1,19 @@ from __future__ import annotations import argparse -from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from .rule_add import PAMGatewayActionDiscoverRuleAddCommand from ..pam.router_helper import router_get_connected_gateways from ...display import bcolors -from ...discovery_common.rule import Rules, RuleTypeEnum +from ...discovery_common.rule import Rules, RuleActionEnum, RuleTypeEnum class PAMGatewayActionDiscoverRuleUpdateCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-discover-rule-update') + parser = argparse.ArgumentParser(prog='pam action discover rule update') parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name of UID.') + parser.add_argument('--configuration-uid', '-c', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') + parser.add_argument('--rule-id', '-i', required=True, dest='rule_id', action='store', help='Identifier for the rule') parser.add_argument('--action', '-a', required=False, choices=['add', 'ignore', 'prompt'], @@ -48,9 +51,15 @@ def execute(self, params, **kwargs): router_get_connected_gateways(params) gateway = kwargs.get("gateway") - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f'{bcolors.FAIL}Discovery job gateway [{gateway}] was not found.{bcolors.ENDC}') + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=kwargs.get('configuration_uid')) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return try: @@ -62,7 +71,10 @@ def execute(self, params, **kwargs): rule_action = kwargs.get("rule_action") if rule_action is not None: - rule_item.action = RuleTypeEnum.find_enum(rule_action) + action = RuleActionEnum.find_enum(rule_action) + if action is None: + raise ValueError(f"The action does not look correct: {rule_action}") + rule_item.action = action priority = kwargs.get("priority") if priority is not None: @@ -106,11 +118,12 @@ def execute(self, params, **kwargs): statement = kwargs.get("statement") if statement is not None: # validate_rule_statement will throw exceptions. - rule_item.statement = PAMGatewayActionDiscoverRuleAddCommand.validate_rule_statement( + statement_struct = PAMGatewayActionDiscoverRuleAddCommand.validate_rule_statement( params=params, gateway_context=gateway_context, statement=statement ) + print(" * Changing the rule statement.") name = kwargs.get("name") diff --git a/keepercommander/commands/discoveryrotation.py b/keepercommander/commands/discoveryrotation.py index 76c563ac4..40b989875 100644 --- a/keepercommander/commands/discoveryrotation.py +++ b/keepercommander/commands/discoveryrotation.py @@ -78,7 +78,7 @@ from .pam_service.list import PAMActionServiceListCommand from .pam_service.add import PAMActionServiceAddCommand from .pam_service.remove import PAMActionServiceRemoveCommand -from .pam_saas.add import PAMActionSaasAddCommand +from .pam_saas.set import PAMActionSaasSetCommand from .pam_saas.user import PAMActionSaasUserCommand from .pam_saas.remove import PAMActionSaasRemoveCommand from .pam_saas.config import PAMActionSaasConfigCommand @@ -266,8 +266,8 @@ def __init__(self): super(PAMActionSaasCommand, self).__init__() self.register_command('config', PAMActionSaasConfigCommand(), 'Create a configuration for a SaaS rotation.', 'c') - self.register_command('add', PAMActionSaasAddCommand(), - 'Add a SaaS rotation to a PAM User record.', 'a') + self.register_command('set', PAMActionSaasSetCommand(), + 'Set a SaaS rotation on a PAM User record.', 's') self.register_command('remove', PAMActionSaasRemoveCommand(), 'Remove a SaaS rotation from a PAM User record', 'r') self.register_command('user', PAMActionSaasUserCommand(), diff --git a/keepercommander/commands/pam_debug/acl.py b/keepercommander/commands/pam_debug/acl.py index 2389531d3..9a7bca7d2 100644 --- a/keepercommander/commands/pam_debug/acl.py +++ b/keepercommander/commands/pam_debug/acl.py @@ -1,7 +1,8 @@ from __future__ import annotations import argparse import logging -from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext, PAM_USER +from ..discover import (PAMGatewayActionDiscoverCommandBase, GatewayContext, PAM_USER, MultiConfigurationException, + multi_conf_msg) from ...display import bcolors from ... import vault from ...discovery_common.record_link import RecordLink @@ -14,11 +15,13 @@ class PAMDebugACLCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-debug-acl') + parser = argparse.ArgumentParser(prog='pam action debug acl') # The record to base everything on. parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name or UID.') + parser.add_argument('--configuration-uid', "-c", required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') parser.add_argument('--user-uid', '-u', required=True, dest='user_uid', action='store', help='User UID.') @@ -39,9 +42,16 @@ def execute(self, params: KeeperParams, **kwargs): print("") - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + configuration_uid = kwargs.get('configuration_uid') + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=configuration_uid) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return record_link = RecordLink(record=gateway_context.configuration, @@ -111,7 +121,7 @@ def execute(self, params: KeeperParams, **kwargs): if belongs_to_vertex is None: print("User record does not belong to any resource, or provider.") else: - if belongs_to_vertex.active is False: + if not belongs_to_vertex.active: print("User record belongs to an inactive parent.") else: print("User record belongs to another record.") diff --git a/keepercommander/commands/pam_debug/gateway.py b/keepercommander/commands/pam_debug/gateway.py index b56e574d6..3642668ce 100644 --- a/keepercommander/commands/pam_debug/gateway.py +++ b/keepercommander/commands/pam_debug/gateway.py @@ -1,6 +1,6 @@ from __future__ import annotations import argparse -from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from .graph import PAMDebugGraphCommand from ...display import bcolors from ...discovery_common.infrastructure import Infrastructure @@ -14,7 +14,7 @@ class PAMDebugGatewayCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-debug-gateway') + parser = argparse.ArgumentParser(prog='pam action debug gateway') type_name_map = { PAM_USER: "PAM User", @@ -26,6 +26,8 @@ class PAMDebugGatewayCommand(PAMGatewayActionDiscoverCommandBase): # The record to base everything on. parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name or UID') + parser.add_argument('--configuration-uid', "-c", required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') def get_parser(self): return PAMDebugGatewayCommand.parser @@ -35,9 +37,16 @@ def execute(self, params: KeeperParams, **kwargs): gateway = kwargs.get("gateway") debug_level = kwargs.get("debug_level", False) - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + configuration_uid = kwargs.get('configuration_uid') + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=configuration_uid) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return infra = Infrastructure(record=gateway_context.configuration, params=params, fail_on_corrupt=False) diff --git a/keepercommander/commands/pam_debug/graph.py b/keepercommander/commands/pam_debug/graph.py index 4e8a65988..7b1fbfc0b 100644 --- a/keepercommander/commands/pam_debug/graph.py +++ b/keepercommander/commands/pam_debug/graph.py @@ -2,7 +2,7 @@ from . import get_connection import argparse import logging -from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from ...display import bcolors from ... import vault from ...discovery_common.infrastructure import Infrastructure @@ -33,11 +33,14 @@ class PAMDebugGraphCommand(PAMGatewayActionDiscoverCommandBase): NO_RECORD = "NO RECORD" OTHER = "OTHER" - parser = argparse.ArgumentParser(prog='pam-action-debug-graph') + parser = argparse.ArgumentParser(prog='pam action debug graph') # The record to base everything on. parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name or UID.') + parser.add_argument('--configuration-uid', "-c", required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') + parser.add_argument('--type', '-t', required=True, choices=['infra', 'rl', 'service', 'jobs'], dest='graph_type', action='store', help='Graph type', default='infra') parser.add_argument('--raw', required=False, dest='raw', action='store_true', @@ -102,7 +105,7 @@ def _do_text_list_infra(self, params: KeeperParams, gateway_context: GatewayCont def _handle(current_vertex: DAGVertex, indent: int = 0, last_record_type: Optional[str] = None): - if current_vertex.active is False: + if not current_vertex.active: return pad = "" @@ -113,9 +116,9 @@ def _handle(current_vertex: DAGVertex, indent: int = 0, last_record_type: Option ls = line_start.get(indent, " ") cf = color_func.get(indent, self._p) - if current_vertex.active is False: + if not current_vertex.active: text += f"{pad}{current_vertex.uid} " + self._f("(Inactive)") - elif current_vertex.corrupt is False: + elif not current_vertex.corrupt: current_content = DiscoveryObject.get_discovery_object(current_vertex) if current_content.record_uid is None: text += f"{pad}{ls}{current_vertex.uid}; {current_content.title} does not have a record." @@ -172,7 +175,7 @@ def _do_text_list_rl(self, params: KeeperParams, gateway_context: GatewayContext print(self._h(f"{pad}{record.record_type}, {record.title}, {record.record_uid}")) - if configuration.has_data is True: + if configuration.has_data: try: data = configuration.content_as_dict print(f"{pad} . data") @@ -218,7 +221,7 @@ def _group(configuration_vertex: DAGVertex) -> dict: vertex = item.get("v") # type: DAGVertex record = item.get("r") # type: TypedRecord text = self._gr(f"{record.title}; {record.record_uid}") - if vertex.active is False: + if not vertex.active: text += " " + self._f("Inactive") print(f"{pad} * {text}") @@ -228,19 +231,19 @@ def _group(configuration_vertex: DAGVertex) -> dict: if acl is None: print(f"{pad} {self._f('missing ACL')}") else: - if acl.is_iam_user is True: + if acl.is_iam_user: print(f"{pad} . is IAM user") - if acl.is_admin is True: + if acl.is_admin: print(f"{pad} . is the {self._b('Admin')}") - if acl.belongs_to is True: + if acl.belongs_to: print(f"{pad} . belongs to this resource") else: print(f"{pad} . looks like directory user") if acl.rotation_settings: - if acl.rotation_settings.noop is True: + if acl.rotation_settings.noop: print(f"{pad} . is a NOOP") - if acl.rotation_settings.disabled is True: + if acl.rotation_settings.disabled: print(f"{pad} . rotation is disabled") if (acl.rotation_settings.saas_record_uid_list is not None @@ -250,7 +253,7 @@ def _group(configuration_vertex: DAGVertex) -> dict: continue - if vertex.has_data is True: + if vertex.has_data: try: data = vertex.content_as_dict print(f"{pad} . data") @@ -265,7 +268,7 @@ def _group(configuration_vertex: DAGVertex) -> dict: for child in children: child_record = vault.KeeperRecord.load(params, child.uid) # type: Optional[TypedRecord] if child_record is None: - if child.active is True: + if child.active: bad.append(self._f(f"- Record UID {child.uid} does not exists.")) continue else: @@ -274,14 +277,14 @@ def _group(configuration_vertex: DAGVertex) -> dict: if acl is None: print(f"{pad} {self._f('missing ACL')}") else: - if acl.is_admin is True: + if acl.is_admin: print(f"{pad} . is the {self._b('Admin')}") - if acl.belongs_to is True: + if acl.belongs_to: print(f"{pad} . belongs to this resource") else: print(f"{pad} . looks like directory user") - if child.has_data is True: + if child.has_data: try: data = child.content_as_dict print(f"{pad} . data") @@ -298,7 +301,7 @@ def _group(configuration_vertex: DAGVertex) -> dict: vertex = item.get("v") # type: DAGVertex record = item.get("r") # type: TypedRecord text = self._gr(f"{record.record_type}; {record.title}; {record.record_uid}") - if vertex.active is False: + if not vertex.active: text += " " + self._f("Inactive") print(f"{pad} * {text}") @@ -325,12 +328,12 @@ def _handle(current_vertex: DAGVertex, parent_vertex: Optional[DAGVertex] = None record = vault.KeeperRecord.load(params, current_vertex.uid) # type: Optional[TypedRecord] if record is None: - if current_vertex.active is False: + if not current_vertex.active: print(f"{pad}Record {current_vertex.uid} does not exists, inactive in the graph.") else: print(f"{pad}Record {current_vertex.uid} does not exists, active in the graph.") return - elif current_vertex.active is False: + elif not current_vertex.active: print(f"{pad}{record.record_type}, {record.title}, {record.record_uid} exists, " "inactive in the graph.") return @@ -340,9 +343,9 @@ def _handle(current_vertex: DAGVertex, parent_vertex: Optional[DAGVertex] = None if acl is not None: acl_text = self._f("None") acl_parts = [] - if acl.is_service is True: + if acl.is_service: acl_parts.append(self._bl("Service")) - if acl.is_task is True: + if acl.is_task: acl_parts.append(self._bl("Task")) if len(acl_parts) > 0: acl_text = ", ".join(acl_parts) @@ -377,7 +380,7 @@ def _do_text_list_jobs(self, params: KeeperParams, gateway_context: GatewayConte return vertex = vertices[0] - if vertex.has_data is False: + if not vertex.has_data: print(self._f(f"The job vertex does not contain any data")) return @@ -398,7 +401,7 @@ def _do_text_list_jobs(self, params: KeeperParams, gateway_context: GatewayConte print(f"{pad} Ended: {job.end_ts_str}") print(f"{pad} Duration: {job.duration_sec_str}") print(f"{pad} Infra Sync Point: {job.sync_point}") - if job.success is True: + if job.success: print(f"{pad} Status: {self._gr('Success')}") else: print(f"{pad} Status: {self._f('Fail')}") @@ -417,9 +420,9 @@ def _do_text_list_jobs(self, params: KeeperParams, gateway_context: GatewayConte if vertex is None: print(f"{pad} * Vertex {added.uid} does not exists.") else: - if vertex.active is False: + if not vertex.active: print(f"{pad} * Vertex {added.uid} is inactive.") - elif vertex.corrupt is True: + elif vertex.corrupt: print(f"{pad} * Vertex {added.uid} is corrupt.") else: content = DiscoveryObject.get_discovery_object(vertex) @@ -433,9 +436,9 @@ def _do_text_list_jobs(self, params: KeeperParams, gateway_context: GatewayConte if vertex is None: print(f"{pad} * Vertex {changed.uid} does not exists.") else: - if vertex.active is False: + if not vertex.active: print(f"{pad} * Vertex {changed.uid} is inactive.") - elif vertex.corrupt is True: + elif vertex.corrupt: print(f"{pad} * Vertex {changed.uid} is corrupt.") else: content = DiscoveryObject.get_discovery_object(vertex) @@ -586,21 +589,21 @@ def _handle(current_vertex: DAGVertex, last_vertex: Optional[DAGVertex] = None, edge_types = [] if last_vertex is not None: for edge in current_vertex.edges: # type: DAGEdge - if edge.active is False: + if not edge.active: continue if edge.head_uid == last_vertex.uid: edge_types.append(edge.edge_type.value) if len(edge_types) > 0: text += f"; edges: {', '.join(edge_types)}" - if current_vertex.active is False: + if not current_vertex.active: text += " " + self._f("Inactive") - if current_vertex.corrupt is True: + if current_vertex.corrupt: text += " " + self._f("Corrupt") print(text) - if current_vertex.active is False: + if not current_vertex.active: logging.debug(f"vertex {current_vertex.uid} is not active, will not get children.") return @@ -653,18 +656,25 @@ def execute(self, params: KeeperParams, **kwargs): do_render = kwargs.get("do_render") debug_level = int(kwargs.get("debug_level", 0)) - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + configuration_uid = kwargs.get('configuration_uid') + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=configuration_uid) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return - if raw is True: - if do_text_list is True: + if raw: + if do_text_list: self._do_raw_text_list(params=params, gateway_context=gateway_context, graph_id=PAMDebugGraphCommand.graph_id_map.get(graph_type), debug_level=debug_level) - if do_render is True: + if do_render: filepath = kwargs.get("filepath") graph_format = kwargs.get("format") self._do_raw_render_graph(params=params, @@ -674,14 +684,14 @@ def execute(self, params: KeeperParams, **kwargs): graph_id=PAMDebugGraphCommand.graph_id_map.get(graph_type), debug_level=debug_level) else: - if do_text_list is True: + if do_text_list: self.do_list( params=params, gateway_context=gateway_context, graph_type=graph_type, debug_level=debug_level ) - if do_render is True: + if do_render: filepath = kwargs.get("filepath") graph_format = kwargs.get("format") render_func = getattr(self, f"_do_render_{graph_type}") diff --git a/keepercommander/commands/pam_debug/info.py b/keepercommander/commands/pam_debug/info.py index 75da7e09c..49be65f8d 100644 --- a/keepercommander/commands/pam_debug/info.py +++ b/keepercommander/commands/pam_debug/info.py @@ -19,7 +19,7 @@ class PAMDebugInfoCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-debug-info') + parser = argparse.ArgumentParser(prog='pam action debug info') type_name_map = { PAM_USER: "PAM User", @@ -176,9 +176,9 @@ def _print_field(f): acl_content = acl_edge.content_as_object(UserAcl) # type: UserAcl print(f" * ACL to {self._n(parent_record.record_type)}; {parent_record.title}; " f"{record_parent_vertex.uid}") - if acl_content.is_admin is True: + if acl_content.is_admin: print(f" . Is {self._gr('Admin')}") - if acl_content.belongs_to is True: + if acl_content.belongs_to: print(f" . Belongs") else: print(f" . Is {self._bl('Remote user')}") @@ -233,9 +233,9 @@ def _print_field(f): acl_content = acl_edge.content_as_object(UserAcl) print(f" * ACL from {self._n(child_record.record_type)}; {child_record.title}; " f"{record_child_vertex.uid}") - if acl_content.is_admin is True: + if acl_content.is_admin: print(f" . Is {self._gr('Admin')}") - if acl_content.belongs_to is True: + if acl_content.belongs_to: print(f" . Belongs") else: print(f" . Is {self._bl('Remote user')}") diff --git a/keepercommander/commands/pam_debug/link.py b/keepercommander/commands/pam_debug/link.py index c6986eff6..766d27d16 100644 --- a/keepercommander/commands/pam_debug/link.py +++ b/keepercommander/commands/pam_debug/link.py @@ -1,7 +1,8 @@ from __future__ import annotations import argparse import logging -from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext, PAM_MACHINE, PAM_DATABASE, PAM_DIRECTORY +from ..discover import (PAMGatewayActionDiscoverCommandBase, GatewayContext, PAM_MACHINE, PAM_DATABASE, PAM_DIRECTORY, + MultiConfigurationException, multi_conf_msg) from ...display import bcolors from ... import vault from ...discovery_common.record_link import RecordLink @@ -13,11 +14,14 @@ class PAMDebugLinkCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-debug-link') + parser = argparse.ArgumentParser(prog='pam action debug link') # The record to base everything on. parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name or UID.') + parser.add_argument('--configuration-uid', "-c", required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') + parser.add_argument('--resource-uid', '-r', required=True, dest='resource_uid', action='store', help='Resource record UID.') parser.add_argument('--debug-gs-level', required=False, dest='debug_level', action='store', @@ -34,9 +38,16 @@ def execute(self, params: KeeperParams, **kwargs): print("") - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + configuration_uid = kwargs.get('configuration_uid') + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=configuration_uid) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return record_link = RecordLink(record=gateway_context.configuration, diff --git a/keepercommander/commands/pam_debug/rotation_setting.py b/keepercommander/commands/pam_debug/rotation_setting.py index 8af6ab7f1..41e4a034f 100644 --- a/keepercommander/commands/pam_debug/rotation_setting.py +++ b/keepercommander/commands/pam_debug/rotation_setting.py @@ -19,7 +19,7 @@ class PAMDebugRotationSettingsCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-debug-rotation') + parser = argparse.ArgumentParser(prog='pam action debug rotation') # The record to base everything on. parser.add_argument('--user-record-uid', '-i', required=True, dest='user_record_uid', action='store', @@ -118,7 +118,7 @@ def execute(self, params: KeeperParams, **kwargs): rq.pwdComplexity = b'' rq.disabled = False - if dry_run is False: + if not dry_run: router_set_record_rotation_information(params, rq) params.sync_data = True @@ -215,7 +215,7 @@ def execute(self, params: KeeperParams, **kwargs): f"creating.{bcolors.ENDC}") record_link.belongs_to(configuration_record_uid, parent_uid) - if dry_run is False: + if not dry_run: record_link.save() print(f"{bcolors.OKGREEN}{user_acl.model_dump_json(indent=4)}{bcolors.ENDC}") diff --git a/keepercommander/commands/pam_debug/verify.py b/keepercommander/commands/pam_debug/verify.py index 82c953667..cabac24a3 100644 --- a/keepercommander/commands/pam_debug/verify.py +++ b/keepercommander/commands/pam_debug/verify.py @@ -1,8 +1,7 @@ from __future__ import annotations -from . import get_connection import logging import argparse -from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from ...display import bcolors from ...vault import TypedRecord from ...discovery_common.verify import Verify @@ -15,11 +14,14 @@ class PAMDebugVerifyCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-debug-verify') + parser = argparse.ArgumentParser(prog='pam action debug verify') # The record to base everything on. parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name or UID.') + parser.add_argument('--configuration-uid', "-c", required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') + parser.add_argument('--fix', required=False, dest='fix', action='store_true', help='Fix all problems.') parser.add_argument('--debug-gs-level', required=False, dest='debug_level', action='store', @@ -33,6 +35,17 @@ def execute(self, params: KeeperParams, **kwargs): gateway = kwargs.get("gateway") fix = kwargs.get("fix", False) debug_level = kwargs.get("debug_level", False) + configuration_uid = kwargs.get('configuration_uid') + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=configuration_uid) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) + return gateway_context = GatewayContext.from_gateway(params, gateway) if gateway_context is None: diff --git a/keepercommander/commands/pam_debug/vertex.py b/keepercommander/commands/pam_debug/vertex.py index 7d1a25692..3d37e61dc 100644 --- a/keepercommander/commands/pam_debug/vertex.py +++ b/keepercommander/commands/pam_debug/vertex.py @@ -1,26 +1,20 @@ - from __future__ import annotations import argparse -from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from ...display import bcolors -from ... import vault, vault_extensions from ...discovery_common.infrastructure import Infrastructure -from ...discovery_common.record_link import RecordLink -from ...discovery_common.user_service import UserService -from ...discovery_common.types import UserAcl, DiscoveryObject +from ...discovery_common.types import DiscoveryObject from ...discovery_common.constants import PAM_USER, PAM_MACHINE, PAM_DATABASE, PAM_DIRECTORY from ...keeper_dag import EdgeType import time -import re -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING if TYPE_CHECKING: - from ...vault import TypedRecord from ...params import KeeperParams class PAMDebugVertexCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam-action-debug-info') + parser = argparse.ArgumentParser(prog='pam action debug info') type_name_map = { PAM_USER: "PAM User", @@ -32,6 +26,9 @@ class PAMDebugVertexCommand(PAMGatewayActionDiscoverCommandBase): # The record to base everything on. parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name or UID') + parser.add_argument('--configuration-uid', '-c', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') + parser.add_argument('--vertex', '-i', required=True, dest='vertex_uid', action='store', help='Vertex in infrastructure graph') @@ -43,9 +40,16 @@ def execute(self, params: KeeperParams, **kwargs): gateway = kwargs.get("gateway") debug_level = kwargs.get("debug_level", False) - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + configuration_uid = kwargs.get('configuration_uid') + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=configuration_uid) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return infra = Infrastructure(record=gateway_context.configuration, params=params, fail_on_corrupt=False, diff --git a/keepercommander/commands/pam_saas/__init__.py b/keepercommander/commands/pam_saas/__init__.py index 458b35099..af36fc76b 100644 --- a/keepercommander/commands/pam_saas/__init__.py +++ b/keepercommander/commands/pam_saas/__init__.py @@ -92,7 +92,9 @@ class SaasCatalog(BaseModel): @property def file_name(self): - return self.file.split(os.sep)[-1] if self.file else None + # `file` will be either a file name or URL to a file. + # This will just get the file name. + return self.file.split("/")[-1] if self.file else None def get_gateway_saas_schema(params: KeeperParams, gateway_context: GatewayContext) -> Optional[List[dict]]: diff --git a/keepercommander/commands/pam_saas/config.py b/keepercommander/commands/pam_saas/config.py index 3cd2c42e0..21dbd2e9e 100644 --- a/keepercommander/commands/pam_saas/config.py +++ b/keepercommander/commands/pam_saas/config.py @@ -1,7 +1,7 @@ from __future__ import annotations import argparse from ..pam.pam_dto import GatewayAction -from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from ...display import bcolors from . import get_plugins_map, make_script_signature, SaasCatalog, get_field_input from ... import api, subfolder, utils, crypto, vault, vault_extensions, attachment, record_management @@ -49,10 +49,13 @@ def toJSON(self): class PAMActionSaasConfigCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam action saas install') + parser = argparse.ArgumentParser(prog='pam action saas config') parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name of UID.') + parser.add_argument('--configuration-uid', '-c', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') + parser.add_argument('--list', '-l', required=False, dest='do_list', action='store_true', help='List available SaaS rotations.') @@ -64,7 +67,7 @@ class PAMActionSaasConfigCommand(PAMGatewayActionDiscoverCommandBase): parser.add_argument('--info', required=False, dest='do_info', action='store_true', help='Get information about a plugin or plugins being used.') - parser.add_argument('--create', '-c', required=False, dest='do_create', action='store_true', + parser.add_argument('--create', required=False, dest='do_create', action='store_true', help='Create a SaaS Plugin config record.') parser.add_argument('--update-config-uid', '-u', required=False, dest='do_update', action='store', help='Update an existing SaaS configuration.') @@ -281,8 +284,8 @@ def _create_config(params: KeeperParams, print(f"{bcolors.OKGREEN}Created SaaS configuration record with UID of {record.record_uid}{bcolors.ENDC}") print("") print("Assign this configuration to a user using the following command.") - print(f" {bcolors.OKGREEN}pam action saas add -c {record.record_uid} -u {bcolors.ENDC}") - print(f" See {bcolors.OKGREEN}pam action saas add --help{bcolors.ENDC} for more information.") + print(f" {bcolors.OKGREEN}pam action saas set -c {record.record_uid} -u {bcolors.ENDC}") + print(f" See {bcolors.OKGREEN}pam action saas set --help{bcolors.ENDC} for more information.") def execute(self, params: KeeperParams, **kwargs): @@ -294,11 +297,17 @@ def execute(self, params: KeeperParams, **kwargs): use_plugin = kwargs.get("plugin") # type: Optional[str] gateway = kwargs.get("gateway") # type: str - - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print("") - print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + configuration_uid = kwargs.get('configuration_uid') # type Optional[str] + + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=configuration_uid) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return plugins = get_plugins_map( diff --git a/keepercommander/commands/pam_saas/remove.py b/keepercommander/commands/pam_saas/remove.py index d2b1c4457..f4a8fb7d6 100644 --- a/keepercommander/commands/pam_saas/remove.py +++ b/keepercommander/commands/pam_saas/remove.py @@ -18,8 +18,6 @@ class PAMActionSaasRemoveCommand(PAMGatewayActionDiscoverCommandBase): parser.add_argument('--user-uid', '-u', required=True, dest='user_uid', action='store', help='The UID of the User record') - parser.add_argument('--config-record-uid', '-c', required=True, dest='config_record_uid', - action='store', help='The UID of the record that has SaaS configuration') parser.add_argument('--resource-uid', '-r', required=False, dest='resource_uid', action='store', help='The UID of the Resource record, if needed.') @@ -30,7 +28,6 @@ def execute(self, params: KeeperParams, **kwargs): user_uid = kwargs.get("user_uid") # type: str resource_uid = kwargs.get("resource_uid") # type: str - config_record_uid = kwargs.get("config_record_uid") # type: str print("") @@ -100,23 +97,10 @@ def execute(self, params: KeeperParams, **kwargs): "This combination is not allowed.")) return - # If there is a resource record, it not NOOP. - # If there is NO resource record, it is NOOP. - acl.rotation_settings.noop = resource_uid is None - - # PyCharm didn't like appending directly, so do this stupid thing. - record_uid_list = acl.rotation_settings.saas_record_uid_list - - # Check if the SaaS config is being used by this user. - if config_record_uid not in record_uid_list: - print(f"{bcolors.WARNING}The SaaS configuration record is not being used by " - f"this user record.{bcolors.ENDC}") - return - - record_uid_list.remove(config_record_uid) - acl.rotation_settings.saas_record_uid_list = record_uid_list + # An empty array removes the SaaS config. + acl.rotation_settings.saas_record_uid_list = [] record_link.belongs_to(user_uid, parent_uid, acl) record_link.save() - print(self._gr("Remove the SaaS service rotation from the user record.")) + print(self._gr("Removing SaaS service rotation from the user record.")) diff --git a/keepercommander/commands/pam_saas/add.py b/keepercommander/commands/pam_saas/set.py similarity index 92% rename from keepercommander/commands/pam_saas/add.py rename to keepercommander/commands/pam_saas/set.py index edfba9ded..2c8f0d18d 100644 --- a/keepercommander/commands/pam_saas/add.py +++ b/keepercommander/commands/pam_saas/set.py @@ -13,8 +13,8 @@ from ...params import KeeperParams -class PAMActionSaasAddCommand(PAMGatewayActionDiscoverCommandBase): - parser = argparse.ArgumentParser(prog='pam action saas add') +class PAMActionSaasSetCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='pam action saas set') parser.add_argument('--user-uid', '-u', required=True, dest='user_uid', action='store', help='The UID of the User record') @@ -24,7 +24,7 @@ class PAMActionSaasAddCommand(PAMGatewayActionDiscoverCommandBase): help='The UID of the Resource record, if needed.') def get_parser(self): - return PAMActionSaasAddCommand.parser + return PAMActionSaasSetCommand.parser def execute(self, params: KeeperParams, **kwargs): @@ -152,19 +152,15 @@ def execute(self, params: KeeperParams, **kwargs): if acl.is_iam_user is False: acl.rotation_settings.noop = resource_uid is None - # PyCharm didn't like appending directly, so do this stupid thing. - record_uid_list = acl.rotation_settings.saas_record_uid_list - # Make sure we are not re-adding the same SaaS config. - if config_record_uid in record_uid_list: + if config_record_uid in acl.rotation_settings.saas_record_uid_list: print(self._f("The SaaS configuration record is already being used for this user.")) return - record_uid_list.append(config_record_uid) - acl.rotation_settings.saas_record_uid_list = record_uid_list + acl.rotation_settings.saas_record_uid_list = [config_record_uid] record_link.belongs_to(user_uid, parent_uid, acl=acl) record_link.save() - print(self._gr(f"Added {plugin_name} rotation to the user record.")) + print(self._gr(f"Setting {plugin_name} rotation for the user record.")) print("") diff --git a/keepercommander/commands/pam_saas/update.py b/keepercommander/commands/pam_saas/update.py index d3275128c..3adfe5675 100644 --- a/keepercommander/commands/pam_saas/update.py +++ b/keepercommander/commands/pam_saas/update.py @@ -2,7 +2,7 @@ import argparse import logging import traceback -from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from ...display import bcolors from ... import api, vault, vault_extensions, attachment, record_management, utils from . import (get_plugins_map, make_script_signature, SaasCatalog, get_field_input, get_record_field_value, @@ -25,6 +25,9 @@ class PAMActionSaasUpdateCommand(PAMGatewayActionDiscoverCommandBase): parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name of UID.') + parser.add_argument('--configuration-uid', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') + parser.add_argument('--all', '-a', required=False, dest='do_all', action='store_true', help='Update all configurations.') parser.add_argument('--config-record-uid', '-c', required=False, dest='config_uid', action='store', @@ -252,10 +255,17 @@ def execute(self, params: KeeperParams, **kwargs): config_record_uid = kwargs.get("config_uid") # type: str do_dry_run = kwargs.get("do_dry_run", False) # type: bool - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print("") - print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + configuration_uid = kwargs.get('configuration_uid') # type Optional[str] + + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=configuration_uid) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return print("") @@ -269,7 +279,6 @@ def execute(self, params: KeeperParams, **kwargs): gateway_context=gateway_context ) - if do_all: logging.debug("search vault for login record types") for record in list(vault_extensions.find_records(params, record_type="login")): diff --git a/keepercommander/discovery_common/record_link.py b/keepercommander/discovery_common/record_link.py index 5afc2b450..d96ab167e 100644 --- a/keepercommander/discovery_common/record_link.py +++ b/keepercommander/discovery_common/record_link.py @@ -409,7 +409,7 @@ def delete(vertex: DAGVertex): def save(self): - self.logger.info("DISCOVERY COMMON RECORD LINKING GRAPH SAVE CALLED") + # self.logger.info("DISCOVERY COMMON RECORD LINKING GRAPH SAVE CALLED") if self.dag.has_graph: self.logger.debug("saving the record linking.") self.dag.save(delta_graph=False) diff --git a/keepercommander/discovery_common/types.py b/keepercommander/discovery_common/types.py index cea781fb3..6806adc73 100644 --- a/keepercommander/discovery_common/types.py +++ b/keepercommander/discovery_common/types.py @@ -224,7 +224,7 @@ def __del__(self): class ActionRuleItem(RuleItem): - action: RuleActionEnum = RuleActionEnum.PROMPT + action: Optional[RuleActionEnum] = RuleActionEnum.PROMPT shared_folder_uid: Optional[str] = None admin_uid: Optional[str] = None From 11290ff47adc8051987eec782b90ce8e912eaffe Mon Sep 17 00:00:00 2001 From: John Walstra Date: Wed, 7 Jan 2026 12:03:04 -0600 Subject: [PATCH 02/24] Fix discovery status for cancelled jobs. --- keepercommander/commands/discover/job_status.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/keepercommander/commands/discover/job_status.py b/keepercommander/commands/discover/job_status.py index 64dacc78d..01c822f97 100644 --- a/keepercommander/commands/discover/job_status.py +++ b/keepercommander/commands/discover/job_status.py @@ -182,13 +182,13 @@ def _find_job(configuration_record) -> Optional[Dict]: color = bcolors.OKBLUE status = "RUNNING" if job.end_ts is not None: - if job.success is None: + if job.success is None and job.end_ts: color = bcolors.WHITE status = "CANCELLED" else: color = bcolors.OKGREEN status = "COMPLETE" - elif job.error: + elif job.success is False: color = bcolors.FAIL status = "FAILED" @@ -352,9 +352,9 @@ def execute(self, params, **kwargs): job["gateway_context"] = gateway_context job["job_item"] = job_item - if job_item.success is None: + if job_item.success is None and job_item.end_ts: job["status"] = "CANCELLED" - elif not job_item.success: + elif job_item.success is False: job["status"] = "FAILED" selected_jobs.append(job) From 6f53161889f5cd22713082b0b552d94eac5a7423 Mon Sep 17 00:00:00 2001 From: John Walstra Date: Thu, 8 Jan 2026 12:12:48 -0600 Subject: [PATCH 03/24] Fix discovery job detail view; update keeper-dag code from main repo. --- keepercommander/commands/discover/__init__.py | 2 +- keepercommander/commands/discover/job_status.py | 14 +++++++------- keepercommander/discovery_common/__version__.py | 2 +- keepercommander/discovery_common/record_link.py | 2 +- keepercommander/discovery_common/types.py | 7 ++++++- keepercommander/keeper_dag/__version__.py | 3 +-- keepercommander/keeper_dag/connection/__init__.py | 12 ++++++++++++ keepercommander/keeper_dag/connection/commander.py | 6 +----- keepercommander/keeper_dag/connection/ksm.py | 3 +-- 9 files changed, 31 insertions(+), 20 deletions(-) diff --git a/keepercommander/commands/discover/__init__.py b/keepercommander/commands/discover/__init__.py index f9c6c653b..247a2e07d 100644 --- a/keepercommander/commands/discover/__init__.py +++ b/keepercommander/commands/discover/__init__.py @@ -317,7 +317,7 @@ class PAMGatewayActionDiscoverCommandBase(Command): }, "operatingSystem": { "type": "choice", - "values": ["linux", "macos", "windows"] + "values": ["linux", "macos", "windows", "cisco_ios_xe"] } } diff --git a/keepercommander/commands/discover/job_status.py b/keepercommander/commands/discover/job_status.py index 01c822f97..f9e3fd0f3 100644 --- a/keepercommander/commands/discover/job_status.py +++ b/keepercommander/commands/discover/job_status.py @@ -181,18 +181,18 @@ def _find_job(configuration_record) -> Optional[Dict]: color = bcolors.OKBLUE status = "RUNNING" - if job.end_ts is not None: - if job.success is None and job.end_ts: + if job.end_ts is not None and not job.error: + if job.success is None: color = bcolors.WHITE status = "CANCELLED" else: color = bcolors.OKGREEN status = "COMPLETE" - elif job.success is False: + elif job.error: color = bcolors.FAIL status = "FAILED" - status = f"{color}{status}{bcolors.ENDC}" + color_status = f"{color}{status}{bcolors.ENDC}" print("") print(f"{_h('Job ID')}: {job.job_id}") @@ -200,7 +200,7 @@ def _find_job(configuration_record) -> Optional[Dict]: print(f"{_h('Gateway Name')}: {gateway_context.gateway_name}") print(f"{_h('Gateway UID')}: {gateway_context.gateway_uid}") print(f"{_h('Configuration UID')}: {gateway_context.configuration_uid}") - print(f"{_h('Status')}: {status}") + print(f"{_h('Status')}: {color_status}") print(f"{_h('Resource UID')}: {job.resource_uid or 'NA'}") print(f"{_h('Started')}: {job.start_ts_str}") print(f"{_h('Completed')}: {job.end_ts_str}") @@ -210,10 +210,10 @@ def _find_job(configuration_record) -> Optional[Dict]: if status == "FAILED": print("") print(f"{_h('Gateway Error')}:") - print(f"{color}{job['error']}{bcolors.ENDC}") + print(f"{color}{job.error}{bcolors.ENDC}") print("") print(f"{_h('Gateway Stacktrace')}:") - print(f"{color}{job['stacktrace']}{bcolors.ENDC}") + print(f"{color}{job.stacktrace}{bcolors.ENDC}") # If it finished, show information about what was discovered. elif job.end_ts is not None: diff --git a/keepercommander/discovery_common/__version__.py b/keepercommander/discovery_common/__version__.py index 7b344eca4..bc50bee68 100644 --- a/keepercommander/discovery_common/__version__.py +++ b/keepercommander/discovery_common/__version__.py @@ -1 +1 @@ -__version__ = '1.1.2' +__version__ = '1.1.4' diff --git a/keepercommander/discovery_common/record_link.py b/keepercommander/discovery_common/record_link.py index d96ab167e..705c57c8f 100644 --- a/keepercommander/discovery_common/record_link.py +++ b/keepercommander/discovery_common/record_link.py @@ -409,7 +409,7 @@ def delete(vertex: DAGVertex): def save(self): - # self.logger.info("DISCOVERY COMMON RECORD LINKING GRAPH SAVE CALLED") + self.logger.debug("DISCOVERY COMMON RECORD LINKING GRAPH SAVE CALLED") if self.dag.has_graph: self.logger.debug("saving the record linking.") self.dag.save(delta_graph=False) diff --git a/keepercommander/discovery_common/types.py b/keepercommander/discovery_common/types.py index 6806adc73..710353f44 100644 --- a/keepercommander/discovery_common/types.py +++ b/keepercommander/discovery_common/types.py @@ -212,7 +212,7 @@ def search(self, search: str) -> bool: def close(self): try: - if self.engine_rule: + if self.engine_rule and hasattr(self.rule_engine, "close"): self.engine_rule.close() self.engine_rule = None del self.engine_rule @@ -423,6 +423,11 @@ class FactsNameUser(BaseModel): class Facts(BaseModel): name: Optional[str] = None + + # For devices + make: Optional[str] = None + model: Optional[str] = None + directories: List[FactsDirectory] = [] id: Optional[FactsId] = None services: List[FactsNameUser] = [] diff --git a/keepercommander/keeper_dag/__version__.py b/keepercommander/keeper_dag/__version__.py index ffd0919d5..394531931 100644 --- a/keepercommander/keeper_dag/__version__.py +++ b/keepercommander/keeper_dag/__version__.py @@ -1,2 +1 @@ -__version__ = '1.1.3' # pragma: no cover - +__version__ = '1.1.6' # pragma: no cover diff --git a/keepercommander/keeper_dag/connection/__init__.py b/keepercommander/keeper_dag/connection/__init__.py index 659579978..ab0c318a1 100644 --- a/keepercommander/keeper_dag/connection/__init__.py +++ b/keepercommander/keeper_dag/connection/__init__.py @@ -97,6 +97,18 @@ def get_encrypted_payload_data(encrypted_payload_data: bytes) -> bytes: except Exception as err: raise Exception(f"Could not parse router response: {err}") + @staticmethod + def get_router_host(server_hostname: str): + + # Only PROD GovCloud strips the subdomain (workaround for prod infrastructure). + # DEV/QA GOV (govcloud.dev.keepersecurity.us, govcloud.qa.keepersecurity.us) keep govcloud. + if server_hostname == 'govcloud.keepersecurity.us': + configured_host = 'connect.keepersecurity.us' + else: + configured_host = f'connect.{server_hostname}' + + return os.environ.get("ROUTER_HOST", configured_host) + def rest_call_to_router(self, http_method: str, endpoint: str, diff --git a/keepercommander/keeper_dag/connection/commander.py b/keepercommander/keeper_dag/connection/commander.py index e69f18fb3..b7460ff69 100644 --- a/keepercommander/keeper_dag/connection/commander.py +++ b/keepercommander/keeper_dag/connection/commander.py @@ -63,11 +63,7 @@ def get_key_bytes(record: KeeperRecord) -> bytes: @property def hostname(self) -> str: - # The host is connect.keepersecurity.com, connect.dev.keepersecurity.com, etc. - from ...constants import get_router_host - server = self.params.config.get("server") - configured_host = get_router_host(server) - return os.environ.get("ROUTER_HOST", configured_host) + return self.get_router_host(self.params.config.get("server")) @property def dag_server_url(self) -> str: diff --git a/keepercommander/keeper_dag/connection/ksm.py b/keepercommander/keeper_dag/connection/ksm.py index f6112ee69..58fa4fac0 100644 --- a/keepercommander/keeper_dag/connection/ksm.py +++ b/keepercommander/keeper_dag/connection/ksm.py @@ -115,8 +115,7 @@ def app_key(self) -> str: return self.get_config_value(ConfigKeys.KEY_APP_KEY) def router_url_from_ksm_config(self) -> str: - from ...constants import get_router_host - return get_router_host(self.hostname) + return self.get_router_host(self.hostname) def ws_router_url_from_ksm_config(self, is_ws: bool = False) -> str: From 3d5ddd87852db4d157557fcc267185f914d32df6 Mon Sep 17 00:00:00 2001 From: Sergey Kolupaev Date: Thu, 8 Jan 2026 11:41:33 -0800 Subject: [PATCH 04/24] Gov Dev server public key --- keepercommander/rest_api.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/keepercommander/rest_api.py b/keepercommander/rest_api.py index ca9a80e93..315cf1d9a 100644 --- a/keepercommander/rest_api.py +++ b/keepercommander/rest_api.py @@ -107,7 +107,10 @@ 'BDXyZZnrl0tc2jdC5I61JjwkjK2kr7uet9tZjt8StTiJTAQQmnVOYBgbtP08PWDbecxnHghx3kJ8QXq1XE68y8c')), 17: crypto.load_ec_public_key(utils.base64_url_decode( - 'BFX68cb97m9_sweGdOVavFM3j5ot6gveg6xT4BtGahfGhKib-zdZyO9pwvv1cBda9ahkSzo1BQ4NVXp9qRyqVGU')), + 'BFX68cb97m9_sweGdOVavFM3j5ot6gveg6xT4BtGahfGhKib-zdZyO9pwvv1cBda9ahkSzo1BQ4NVXp9qRyqVGU')), + + 18: crypto.load_ec_public_key(utils.base64_url_decode( + 'BNhngQqTT1bPKxGuB6FhbPTAeNVFl8PKGGSGo5W06xWIReutm6ix6JPivqnbvkydY-1uDQTr-5e6t70G01Bb5JA')), 107: utils.base64_url_decode('rsSl4OfJIffO0Fxp6oBgGqJtniM8eyhi5lwlOiVZnwGrn_qnTch5fXbJwpAbnvoIa6VS00MeNShBvDo6bNIR2RRuq4lkgHUXNfap79l8JDi6xllfVLKRn5psVrGqXkybxjJH9UNQWJKr5TurXRnG9Wll0xXCC6ppHNszZ3e7D_xt5kFO2NS5PVl83uNp-Qp7iRAzcEtizvB55pkrAbyGkAALlCqsOSMPWXI544ult8EVCgU1CRk8vMsNSmNG8mO3FcCg0rmA7ljNdSQPQjQ231NJqDeRdlUAZclK8gks3WKF0CIsbVmbTZICFesd42Cw9CbHusZUjKoyVIDPVowFmHg72kcmPovBwMuesGg3coAj_NoFAlSyiXsvgPNMDciND-Z7OiRpK-eG6RGyUvhcdlw6k5FzjfNab5Zix9hHkoZhkyG-h9exzTxxgdeu6AMLEKl2QHtXpqle4zelOSiOtkgC8BRyPfjD5PBwrJsduwoXf0KljDXP0ceAmPbMaSMabeMj3CJY8OuHZOIxzQF2J8xEnAC9wuOD2ElNcVyOQ6lyFCueXfofHce_gMAIZpABZVZDxsB2jLBW1VF1yksRb1MAjPYBHFV7YJhHyiMI34VRbPo1o3lq9vk-4rwPwYUWDWQePDUkiYuxQTs36WwXVAtY-XOF9HMnkRjK25sqV_uwzbAIHAyf8SomVHUX7AtjyMYu52d33kkHhiA7ofqXjwBK69odR8Ckzam-egKkuUWlkPJeDXyR_AhpYhAqbzqgQHw7QhpEayBIS9xRcCixONLGkKtn8xIs-6HKhPCjTEDEN_IjVxM-HEY_rHpY_wSWibtNPEPAgAEtZSQIWyjJyPMRYTEgJZEBUUk1hiuNi_FFiNM71wkJldxngipdWYd9KFt5shExO4CAzxtd3MOTrHQEBuXNz6GL50diBJuioRR5QhgCddNejAcet8Ul0duAXuCyI2QDZnCQf0RO97Z9alwYllbA67EZMScBq6s81lAqqXWPJNNWQbQEVzidmdW28ZIbaOaPLOUuIqYJv_J60XuLLcYP4hrMBANqtwAb_HNlXGaW06vJT7wasEMzhDKVNSpFs4VYSMk1tIR6wjspGYJRGGE7hpiaDCPKOQRe24Y2HPJEBmNGWmMjREKpZYKg2nsV9nWvDVShKol1JpZM2kI9AEQPIIo9ZiHBleh_OnynWcCVAdl9ZsOkpOtMFPZap8gooqqMt2FO-gofSpGkFkNs41a5gxGeqRETiBsoa-ZNyqq2wJm77qJqSvC54oGOjFE_mfJBH0I4Fahy6Kkm-zKrJEAuehaVTXVHamgJeSkfLemt3WVlgPwZjNJMh5FsM8W8rclE78yI2WYpY9i0RhOJkNViBGcn5EovgERJPEsLO7SBviJwetmUQyUVbOAyX4ag_cBJ2fmUkwVQuhV6PdsnTqFfV2dM2ptbQgcopypvHyggh1vFooMYDIUCZ7qfBZYx7ZclV0woecJGjiK9mKgTlumnHtkx1zCETtNubQaDANkpAlZkKuS42WczUDEzpye8OpeYgaF6NIolqNFs_plgZVhg9_ZBEIZcnNGd1sgbg3it_Rk7EZXJDHB_6cWoS8VW1Wcq72FBpFDOWDiiCapw0QknC9pfB_gV7zm6nJpzFRcInIqGuNNJdMcCbmPAV0cKBxGR7PfKS4KCLoTCGctS3fKpE-Q-ZJBNwWRwB-cogesufQdr1oKJ3xXJtzGcoMK2n9OSAAStt3A_V-YvkVVzb0Z32HMG79NgAAeCG9xXkidVLIpzLuhg4AQRVzpXf8gry6qBZUppbPCriSSBr-vDlJIHh6gu7-y1cLi2zHinriOoElMK3fWI1JO7HUItxLCHrkzJt4R2N9nOD8gDqIK16-OG9SssQNlex5e0MVkbAA0BR1mn7MSTBvevw2VcsTJoIxXEIjS87fRvuadwrKqw3nKFOPSCZzgC7XFtzxIpazSCGWi_5RSDWnSXGLJvNcKhcvgfpgk4jSoEdDIJ30FUDFzHkusUnfVAbTnCIWCYDKWyk7G8bsRcVZwNxwKJ7G2Mz4a3yrkYuxaFmGJX5EdCEM4EWceF5rVW3ZQuvzo'), @@ -121,7 +124,7 @@ def encrypt_with_keeper_key(context, data: bytes) -> bytes: key_id = context.server_key_id if 1 <= key_id <= 6: return crypto.encrypt_rsa(data, SERVER_PUBLIC_KEYS[key_id]) - elif 7 <= key_id <= 17: + elif 7 <= key_id <= 18: return crypto.encrypt_ec(data, SERVER_PUBLIC_KEYS[key_id]) else: raise KeeperApiError('invalid_key_id', f'Key ID \"{key_id}\" is not valid.') From f0bf31207d5349b3e406d1d86ca915e30c4e04bc Mon Sep 17 00:00:00 2001 From: pvagare-ks Date: Fri, 9 Jan 2026 21:30:12 +0530 Subject: [PATCH 05/24] Added `msp-update` `--name / -n` parameter for updating node, Fix msp `--file-plan` command for 1TB/10TB storage (#1755) (#1756) * Added msp-update --name parameter * KC-1066 fix file storage --- keepercommander/commands/msp.py | 5 +++++ keepercommander/constants.py | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/keepercommander/commands/msp.py b/keepercommander/commands/msp.py index 320484cf8..fbeb6ebb0 100644 --- a/keepercommander/commands/msp.py +++ b/keepercommander/commands/msp.py @@ -69,6 +69,7 @@ def register_command_info(aliases, command_info): msp_update_parser = argparse.ArgumentParser(prog='msp-update', usage='msp-update', description='Modify a Managed Company license') msp_update_parser.add_argument('--node', dest='node', action='store', help='node name or node ID') +msp_update_parser.add_argument('-n', '--name', dest='name', action='store', help='update managed company name') msp_update_parser.add_argument('-p', '--plan', dest='plan', action='store', choices=[x[1] for x in constants.MSP_PLANS], help=f'License plan: {", ".join((x[1] for x in constants.MSP_PLANS))}') @@ -425,6 +426,10 @@ def execute(self, params, **kwargs): raise CommandError('msp-update', f'More than one nodes \"{node_name}\" are found') rq['node_id'] = nodes[0]['node_id'] + new_name = kwargs.get('name') + if new_name: + rq['enterprise_name'] = new_name + permits = next((x['msp_permits'] for x in params.enterprise.get('licenses', []) if 'msp_permits' in x), None) plan_name = kwargs.get('plan') diff --git a/keepercommander/constants.py b/keepercommander/constants.py index 303d77279..121c396e7 100644 --- a/keepercommander/constants.py +++ b/keepercommander/constants.py @@ -19,8 +19,8 @@ MSP_FILE_PLANS = [ (4, 'STORAGE_100GB', '100GB'), - (7, 'STORAGE_1000GB', '1TB'), - (8, 'STORAGE_10000GB', '10TB'), + (7, 'STORAGE_1TB', '1TB'), + (8, 'STORAGE_10TB', '10TB'), ] MSP_PLANS = [ From f2eab01ea79d3c46faeebcb8cd1581d1f5ff99ce Mon Sep 17 00:00:00 2001 From: idimov-keeper <78815270+idimov-keeper@users.noreply.github.com> Date: Fri, 9 Jan 2026 20:51:17 -0600 Subject: [PATCH 06/24] `pam launch` Establish ssh sessions with ssh key creds only (#1744) * Establish ssh sessions with ssh key creds only * Added check for CLI mode support on gateway/guacd (using timeout on STDOUT pipe) * resolved conflicts with upstream --- keepercommander/commands/pam_launch/launch.py | 136 +++++- .../commands/pam_launch/python_handler.py | 122 ++++++ .../pam_launch/terminal_connection.py | 390 ++++++++++++++++-- 3 files changed, 599 insertions(+), 49 deletions(-) diff --git a/keepercommander/commands/pam_launch/launch.py b/keepercommander/commands/pam_launch/launch.py index 7712f8471..767b600df 100644 --- a/keepercommander/commands/pam_launch/launch.py +++ b/keepercommander/commands/pam_launch/launch.py @@ -52,6 +52,12 @@ class PAMLaunchCommand(Command): parser.add_argument('--no-trickle-ice', '-nti', required=False, dest='no_trickle_ice', action='store_true', help='Disable trickle ICE for WebRTC connections. By default, trickle ICE is enabled ' 'for real-time candidate exchange.') + # parser.add_argument('--user', '-u', required=False, dest='launch_credential_uid', type=str, + # help='UID of pamUser record to use as launch credentials when allowSupplyUser is enabled. ' + # 'Fails if allowSupplyUser is not enabled or the specified record is not found.') + # parser.add_argument('--host', '-H', required=False, dest='custom_host', type=str, + # help='Hostname or IP address to connect to when allowSupplyHost is enabled. ' + # 'Fails if allowSupplyHost is not enabled.') def get_parser(self): return PAMLaunchCommand.parser @@ -277,6 +283,95 @@ def execute(self, params: KeeperParams, **kwargs): logging.debug(f"Found record: {record_uid}") + # Validate --user and --host parameters against allowSupply flags + # Note: cmdline options override record data when provided + # launch_credential_uid = kwargs.get('launch_credential_uid') + # custom_host = kwargs.get('custom_host') + + # Load record to check allowSupply flags and existing values + # record = vault.KeeperRecord.load(params, record_uid) + # if not isinstance(record, vault.TypedRecord): + # raise CommandError('pam launch', f'Record {record_uid} is not a TypedRecord') + + # pam_settings_field = record.get_typed_field('pamSettings') + # allow_supply_user = False + # allow_supply_host = False + # user_records_on_record = [] + # hostname_on_record = None + + # Get hostname from record + # hostname_field = record.get_typed_field('pamHostname') + # if hostname_field: + # host_value = hostname_field.get_default_value(dict) + # if host_value: + # hostname_on_record = host_value.get('hostName') + + # if pam_settings_field: + # pam_settings_value = pam_settings_field.get_default_value(dict) + # if pam_settings_value: + # # allowSupplyHost is at top level of pamSettings value + # allow_supply_host = pam_settings_value.get('allowSupplyHost', False) + # # allowSupplyUser is inside connection + # connection = pam_settings_value.get('connection', {}) + # if isinstance(connection, dict): + # allow_supply_user = connection.get('allowSupplyUser', False) + # user_records_on_record = connection.get('userRecords', []) + + # Validation based on allowSupply flags + # if allow_supply_host and allow_supply_user: + # # Both flags true: --user is required (no fallback to userRecords) + # if not launch_credential_uid: + # raise CommandError('pam launch', + # f'Both allowSupplyUser and allowSupplyHost are enabled. ' + # f'You must provide --user to specify launch credentials.') + # # --host required if no hostname on record + # if not custom_host and not hostname_on_record: + # raise CommandError('pam launch', + # f'Both allowSupplyUser and allowSupplyHost are enabled and no hostname on record. ' + # f'You must provide --host to specify the target host.') + + # elif allow_supply_user and not allow_supply_host: + # # Only allowSupplyUser: use --user if provided, else userRecords, else error + # if not launch_credential_uid and not user_records_on_record: + # raise CommandError('pam launch', + # f'allowSupplyUser is enabled but no credentials available. ' + # f'Use --user to specify a pamUser record or configure userRecords on the record.') + + # elif allow_supply_host and not allow_supply_user: + # # Only allowSupplyHost: --host required if no hostname on record + # if not custom_host and not hostname_on_record: + # raise CommandError('pam launch', + # f'allowSupplyHost is enabled but no hostname available. ' + # f'Use --host to specify the target host or configure hostname on the record.') + + # Validate --user parameter if provided + # if launch_credential_uid: + # if not allow_supply_user: + # raise CommandError('pam launch', + # f'--user parameter requires allowSupplyUser to be enabled on the record. ' + # f'allowSupplyUser is currently disabled for record {record_uid}.') + + # # Validate the launch credential record exists and is a pamUser + # cred_record = vault.KeeperRecord.load(params, launch_credential_uid) + # if not cred_record: + # raise CommandError('pam launch', + # f'Launch credential record not found: {launch_credential_uid}') + # if not isinstance(cred_record, vault.TypedRecord) or cred_record.record_type != 'pamUser': + # raise CommandError('pam launch', + # f'Launch credential record {launch_credential_uid} must be a pamUser record. ' + # f'Found: {cred_record.record_type if isinstance(cred_record, vault.TypedRecord) else "non-typed"}') + + # logging.debug(f"Using custom launch credential: {launch_credential_uid}") + + # Validate --host parameter if provided + # if custom_host: + # if not allow_supply_host: + # raise CommandError('pam launch', + # f'--host parameter requires allowSupplyHost to be enabled on the record. ' + # f'allowSupplyHost is currently disabled for record {record_uid}.') + + # logging.debug(f"Using custom host: {custom_host}") + # Find the gateway for this record gateway_info = self.find_gateway(params, record_uid) @@ -407,15 +502,40 @@ def signal_handler_fn(signum, frame): if not connected: raise CommandError('pam launch', "WebRTC connection not established within timeout") + # Wait a brief moment for DataChannel to be ready after connection state becomes "connected" + # The connection state can be "connected" before the DataChannel is actually ready to send data + time.sleep(0.2) + # Send OpenConnection to Gateway to initiate guacd session # This is critical - without it, Gateway doesn't start guacd and no Guacamole traffic flows + # Retry with exponential backoff if DataChannel isn't ready yet logging.debug(f"Sending OpenConnection to Gateway (conn_no=1, conversation_id={conversation_id})") - try: - tube_registry.open_handler_connection(conversation_id, 1) - logging.debug("✓ OpenConnection sent successfully") - except Exception as e: - logging.error(f"Failed to send OpenConnection: {e}") - raise CommandError('pam launch', f"Failed to send OpenConnection: {e}") + max_retries = 5 + retry_delay = 0.1 + last_error = None + + for attempt in range(max_retries): + try: + tube_registry.open_handler_connection(conversation_id, 1) + logging.debug("✓ OpenConnection sent successfully") + break + except Exception as e: + last_error = e + error_str = str(e).lower() + # Check if error is DataChannel-related + if "datachannel" in error_str or "not opened" in error_str: + if attempt < max_retries - 1: + wait_time = retry_delay * (2 ** attempt) # Exponential backoff + logging.debug(f"DataChannel not ready, retrying in {wait_time:.2f}s (attempt {attempt + 1}/{max_retries})") + time.sleep(wait_time) + continue + # For other errors or final attempt, raise immediately + logging.error(f"Failed to send OpenConnection: {e}") + raise CommandError('pam launch', f"Failed to send OpenConnection: {e}") + else: + # All retries exhausted + logging.error(f"Failed to send OpenConnection after {max_retries} attempts: {last_error}") + raise CommandError('pam launch', f"Failed to send OpenConnection after {max_retries} attempts: {last_error}") # Wait for Guacamole ready print("Waiting for Guacamole connection...") @@ -439,6 +559,10 @@ def signal_handler_fn(signum, frame): logging.warning(f"Guacamole did not report ready within {guac_ready_timeout}s") logging.warning("Terminal may still work if data is flowing.") + # Check for STDOUT pipe support (feature detection) + # This warns the user if CLI pipe mode is not supported by the gateway + python_handler.check_stdout_pipe_support(timeout=10.0) + # Create stdin handler for pipe/blob/end input pattern # StdinHandler reads raw stdin and sends via send_stdin (base64-encoded) # This matches kcm-cli's implementation for plaintext SSH/TTY streams diff --git a/keepercommander/commands/pam_launch/python_handler.py b/keepercommander/commands/pam_launch/python_handler.py index dbdc13d69..3294c7b76 100644 --- a/keepercommander/commands/pam_launch/python_handler.py +++ b/keepercommander/commands/pam_launch/python_handler.py @@ -133,6 +133,14 @@ def __init__( # Server sends pipe with name "STDOUT", then blobs with base64 terminal output self.stdout_stream_index: int = -1 + # Feature detection for CLI pipe mode + # STDOUT pipe: if the server supports plaintext SSH/TTY mode, it sends a STDOUT pipe + # STDIN pipe: when we try to send input, the server should ack successfully + self.stdout_pipe_opened = threading.Event() # Set when STDOUT pipe is received + self.stdin_pipe_failed = False # Set if STDIN pipe ack fails + self.stdin_stream_index: int = 0 # Stream index we use for STDIN + self.pending_stdin_ack = False # True when waiting for STDIN ack + # Create instruction router with custom handlers for our needs # Pass self as stdout_stream_tracker so router can decode STDOUT blobs self.parser.oninstruction = create_instruction_router( @@ -142,6 +150,8 @@ def __init__( 'ready': self._on_ready, 'disconnect': self._on_guac_disconnect, 'error': self._on_error, + 'ack': self._on_ack, # Custom ack handler for STDIN failure detection + 'pipe': self._on_pipe, # Custom pipe handler for STDOUT detection }, send_ack_callback=self._send_ack, stdout_stream_tracker=self, @@ -364,6 +374,12 @@ def _send_handshake_response(self, args_list: List[str]): # Get guacd parameters (hostname, port, username, password, etc.) guacd_params = settings.get('guacd_params', {}) + # Debug: Log what credentials we have + logging.debug(f"DEBUG: guacd_params keys: {list(guacd_params.keys())}") + logging.debug(f"DEBUG: guacd_params['username']: {'(set)' if guacd_params.get('username') else '(empty)'}") + logging.debug(f"DEBUG: guacd_params['password']: {'(set)' if guacd_params.get('password') else '(empty)'}") + logging.debug(f"DEBUG: guacd_params['private-key']: {'(set)' if guacd_params.get('private-key') else '(empty)'}") + # Build connect args: first arg is version (from guacd), rest are param values connect_args = [] @@ -394,6 +410,13 @@ def _send_handshake_response(self, args_list: List[str]): connect_instruction = self._format_instruction('connect', *connect_args) self._send_to_gateway(connect_instruction) logging.debug(f"Sent 'connect' with {len(connect_args)} args") + # Debug: Show which args were sent (without revealing secrets) + if args_list: + for i, param_name in enumerate(args_list[1:], start=1): + value = connect_args[i] if i < len(connect_args) else "(missing)" + is_secret = param_name.lower() in ['password', 'passphrase', 'private-key'] + display_value = '(set)' if (is_secret and value) else ('(empty)' if is_secret else value[:20] if isinstance(value, str) else value) + logging.debug(f"DEBUG: connect arg '{param_name}' = {display_value}") # Send size instruction size_instruction = self._format_instruction('size', width, height, dpi) @@ -507,6 +530,58 @@ def _on_error(self, args: List[str]) -> None: logging.error(f"Guacamole error {code}: {message}") + def _on_pipe(self, args: List[str]) -> None: + """ + Handle pipe instruction - track STDOUT pipe opening for feature detection. + + When the server supports plaintext SSH/TTY mode, it sends a pipe with name "STDOUT". + If this pipe never opens, the feature is not supported by the gateway/connection. + + Note: The instruction router handles STDOUT ack and blob decode before calling this. + This handler just sets the event to signal that STDOUT pipe was opened. + + Args: + args: [stream_index, mimetype, name] + """ + if len(args) >= 3: + stream_index, mimetype, name = args[0], args[1], args[2] + logging.debug(f"[PIPE] stream={stream_index}, type={mimetype}, name={name}") + + if name == 'STDOUT': + # Signal that STDOUT pipe was opened - CLI pipe mode is supported + # Note: stream_index and ack are already handled by instruction router + self.stdout_pipe_opened.set() + logging.debug(f"STDOUT pipe opened on stream {stream_index} - CLI pipe mode supported") + else: + logging.debug(f"[PIPE] {args}") + + def _on_ack(self, args: List[str]) -> None: + """ + Handle ack instruction - detect STDIN pipe failures. + + When we try to send input via STDIN pipe, the server should ack successfully. + If the ack has a non-zero code, the STDIN pipe feature is not supported. + + Args: + args: [stream_index, message, code] + """ + if len(args) >= 3: + stream_index, message, code = args[0], args[1], args[2] + logging.debug(f"[ACK] stream={stream_index}, message='{message}', code={code}") + + # Check if this is an ack for our STDIN stream + if self.pending_stdin_ack and int(stream_index) == self.stdin_stream_index: + self.pending_stdin_ack = False + if code != '0': + # Non-zero code means STDIN pipe failed + self.stdin_pipe_failed = True + logging.warning( + f"STDIN pipe failed (stream={stream_index}, code={code}, message='{message}'). " + f"CLI input mode may not be supported by this connection." + ) + else: + logging.debug(f"[ACK] {args}") + def _format_instruction(self, *elements) -> bytes: """Format elements into a Guacamole instruction.""" # Use the new guacamole module's to_instruction function @@ -577,9 +652,18 @@ def send_stdin(self, data: bytes): logging.debug("Ignoring stdin - connection not ready") return + # Check if STDIN pipe previously failed + if self.stdin_pipe_failed: + logging.debug("Ignoring stdin - STDIN pipe not supported") + return + try: # Use stream index 0 for STDIN (matching kcm-cli) stream_index = '0' + self.stdin_stream_index = int(stream_index) + + # Track that we're waiting for ack (for failure detection) + self.pending_stdin_ack = True # Send pipe instruction to open STDIN stream pipe_instruction = self._format_instruction('pipe', stream_index, 'text/plain', 'STDIN') @@ -602,6 +686,44 @@ def send_stdin(self, data: bytes): except Exception as e: logging.error(f"Error sending stdin: {e}") + def check_stdout_pipe_support(self, timeout: float = 10.0) -> bool: + """ + Check if STDOUT pipe is supported with a timeout. + + This should be called after connection is established (after first sync). + If the STDOUT pipe doesn't open within the timeout, warns the user that + CLI pipe mode may not be supported. + + Args: + timeout: Seconds to wait for STDOUT pipe (default 10.0) + + Returns: + True if STDOUT pipe opened, False if timeout expired + """ + if self.stdout_pipe_opened.wait(timeout): + logging.debug("STDOUT pipe support confirmed") + return True + else: + logging.warning( + f"STDOUT pipe did not open within {timeout}s. " + f"CLI pipe mode may not be supported by this gateway/connection." + ) + print( + "\nNo STDOUT stream has been received since the connection was opened. " + "This may indicate the gateway/guacd does not support CLI mode. " + "You can continue waiting, or press Ctrl+C to cancel." + ) + return False + + def is_stdin_supported(self) -> bool: + """ + Check if STDIN pipe is supported. + + Returns: + True if STDIN pipe has not failed, False if it failed + """ + return not self.stdin_pipe_failed + def send_key(self, keysym: int, pressed: bool): """ Send a key event to guacd using X11 keysym. diff --git a/keepercommander/commands/pam_launch/terminal_connection.py b/keepercommander/commands/pam_launch/terminal_connection.py index 64a25b638..57dcf4d85 100644 --- a/keepercommander/commands/pam_launch/terminal_connection.py +++ b/keepercommander/commands/pam_launch/terminal_connection.py @@ -33,6 +33,8 @@ from ...error import CommandError from ... import vault +from ...keeper_dag import EdgeType +from ..ssh_agent import try_extract_private_key from ..tunnel.port_forward.tunnel_helpers import ( get_or_create_tube_registry, start_websocket_listener, @@ -45,9 +47,11 @@ tunnel_encrypt, tunnel_decrypt, get_tunnel_session, + get_keeper_tokens, MAIN_NONCE_LENGTH, SYMMETRIC_KEY_LENGTH, ) +from ..tunnel.port_forward.TunnelGraph import TunnelDAG from ..pam.pam_dto import GatewayAction, GatewayActionWebRTCSession from ..pam.router_helper import ( router_send_action_to_gateway, @@ -295,7 +299,13 @@ def detect_protocol(params: KeeperParams, record_uid: str) -> Optional[str]: f'Supported types: pamMachine, pamDirectory, pamDatabase') -def extract_terminal_settings(params: KeeperParams, record_uid: str, protocol: str) -> Dict[str, Any]: +def extract_terminal_settings( + params: KeeperParams, + record_uid: str, + protocol: str, + launch_credential_uid: Optional[str] = None, + custom_host: Optional[str] = None, +) -> Dict[str, Any]: """ Extract terminal connection settings from a PAM record. @@ -303,6 +313,8 @@ def extract_terminal_settings(params: KeeperParams, record_uid: str, protocol: s params: KeeperParams instance record_uid: Record UID protocol: Protocol type (from detect_protocol) + launch_credential_uid: Optional override for userRecordUid (from --user CLI param) + custom_host: Optional override for hostname (from --host CLI param) Returns: Dictionary containing terminal settings: @@ -312,6 +324,9 @@ def extract_terminal_settings(params: KeeperParams, record_uid: str, protocol: s - terminal: {colorScheme: str, fontSize: str} - recording: {includeKeys: bool} - protocol_specific: Protocol-specific settings dict + - allowSupplyUser: bool - User can supply credentials on-the-fly + - allowSupplyHost: bool - User can supply host on-the-fly (forces userSupplied credential type) + - userRecordUid: str or None - UID of linked pamUser record for credentials Raises: CommandError: If required fields are missing @@ -326,7 +341,10 @@ def extract_terminal_settings(params: KeeperParams, record_uid: str, protocol: s 'clipboard': {'disableCopy': False, 'disablePaste': False}, 'terminal': {'colorScheme': 'gray-black', 'fontSize': '12'}, 'recording': {'includeKeys': False}, - 'protocol_specific': {} + 'protocol_specific': {}, + 'allowSupplyUser': False, + 'allowSupplyHost': False, + 'userRecordUid': None, } # Extract hostname and port @@ -339,8 +357,14 @@ def extract_terminal_settings(params: KeeperParams, record_uid: str, protocol: s raise CommandError('pam launch', f'Invalid hostname configuration for record {record_uid}') settings['hostname'] = host_value.get('hostName') - if not settings['hostname']: - raise CommandError('pam launch', f'Hostname not found in record {record_uid}') + + # Override hostname if custom_host provided (requires allowSupplyHost - validated in launch.py) + if custom_host: + settings['hostname'] = custom_host + logging.debug(f"Using custom host override: {custom_host}") + + # Validate hostname is present (either from record or CLI override) + # Note: allowSupplyHost check happens later after pamSettings are parsed # Get port (use default if not specified) port_value = host_value.get('port') @@ -372,6 +396,22 @@ def extract_terminal_settings(params: KeeperParams, record_uid: str, protocol: s # Recording settings settings['recording']['includeKeys'] = connection.get('recordingIncludeKeys', False) + # allowSupplyUser is inside connection + settings['allowSupplyUser'] = connection.get('allowSupplyUser', False) + + # Extract linked pamUser record UID from pamSettings (may be overridden by CLI later) + # When both admin and launch credentials exist, we must use launch credential + dag_launch_uid = _get_launch_credential_uid(params, record_uid) + if dag_launch_uid: + settings['userRecordUid'] = dag_launch_uid + logging.debug(f"Using launch credential from DAG: {settings['userRecordUid']}") + else: + # Fallback to userRecords from pamSettings if DAG lookup fails + user_records = connection.get('userRecords', []) + if user_records and len(user_records) > 0: + settings['userRecordUid'] = user_records[0] + logging.debug(f"Using userRecordUid from pamSettings: {settings['userRecordUid']}") + # Protocol-specific settings if protocol == ProtocolType.SSH: settings['protocol_specific'] = _extract_ssh_settings(connection) @@ -382,6 +422,27 @@ def extract_terminal_settings(params: KeeperParams, record_uid: str, protocol: s elif protocol in ProtocolType.DATABASE: settings['protocol_specific'] = _extract_database_settings(connection, protocol) + # allowSupplyHost is at top level of pamSettings value, not inside connection + settings['allowSupplyHost'] = pam_settings_value.get('allowSupplyHost', False) + + # CLI overrides always take precedence (applied after pamSettings extraction) + # These are validated in launch.py before being passed here + logging.debug(f"DEBUG extract_terminal_settings: launch_credential_uid={launch_credential_uid}, current userRecordUid={settings.get('userRecordUid')}") + if launch_credential_uid: + settings['userRecordUid'] = launch_credential_uid + logging.debug(f"Using launch credential from CLI override: {settings['userRecordUid']}") + + # Final validation: hostname must be present for connection to succeed + # Note: userRecordUid is optional - if not present, _build_guacamole_connection_settings() + # will fall back to credentials from the pamMachine record directly + if not settings['hostname']: + if settings['allowSupplyHost']: + raise CommandError('pam launch', + f'Hostname not found in record {record_uid}. Use --host to specify one.') + else: + raise CommandError('pam launch', + f'Hostname not found in record {record_uid} and allowSupplyHost is not enabled.') + return settings @@ -434,10 +495,10 @@ def _extract_database_settings(connection: Dict[str, Any], protocol: str) -> Dic return settings -def create_connection_context(params: KeeperParams, - record_uid: str, - gateway_uid: str, - protocol: str, +def create_connection_context(params: KeeperParams, + record_uid: str, + gateway_uid: str, + protocol: str, settings: Dict[str, Any], connect_as: Optional[str] = None) -> Dict[str, Any]: """ @@ -467,8 +528,15 @@ def create_connection_context(params: KeeperParams, 'recording': settings['recording'], 'connectAs': connect_as, 'conversationType': _get_conversation_type(protocol), + # Credential supply flags + 'allowSupplyUser': settings.get('allowSupplyUser', False), + 'allowSupplyHost': settings.get('allowSupplyHost', False), + # Linked pamUser record UID for credential extraction + 'userRecordUid': settings.get('userRecordUid'), } + logging.debug(f"DEBUG create_connection_context: userRecordUid={context.get('userRecordUid')}") + # Add protocol-specific settings if protocol == ProtocolType.SSH: context['ssh'] = settings['protocol_specific'] @@ -483,6 +551,84 @@ def create_connection_context(params: KeeperParams, return context +def _get_launch_credential_uid(params: 'KeeperParams', record_uid: str) -> Optional[str]: + """ + Find the launch credential UID for a PAM record using the DAG. + + When a pamMachine record has both administrative credentials and launch credentials, + we need to use the launch credential (marked with is_launch_credential=True in DAG). + This function queries the DAG to find the correct credential. + + Args: + params: KeeperParams instance + record_uid: UID of the pamMachine record + + Returns: + UID of the launch credential pamUser record, or None if not found + """ + try: + encrypted_session_token, encrypted_transmission_key, transmission_key = get_keeper_tokens(params) + tdag = TunnelDAG(params, encrypted_session_token, encrypted_transmission_key, record_uid, + transmission_key=transmission_key) + + if not tdag.linking_dag.has_graph: + logging.debug(f"No DAG graph loaded for record {record_uid}") + return None + + record_vertex = tdag.linking_dag.get_vertex(record_uid) + if record_vertex is None: + logging.debug(f"Record vertex not found in DAG for {record_uid}") + return None + + # Find all ACL links where Head is recordUID + # Look for the credential marked as is_launch_credential=True + launch_credential = None + admin_credential = None + all_linked = [] + + for user_vertex in record_vertex.has_vertices(EdgeType.ACL): + acl_edge = user_vertex.get_edge(record_vertex, EdgeType.ACL) + if acl_edge: + try: + content = acl_edge.content_as_dict or {} + is_admin = content.get('is_admin', False) + is_launch = content.get('is_launch_credential', None) + + all_linked.append(user_vertex.uid) + + if is_launch and launch_credential is None: + launch_credential = user_vertex.uid + logging.debug(f"Found launch credential via DAG: {launch_credential}") + + if is_admin and admin_credential is None: + admin_credential = user_vertex.uid + logging.debug(f"Found admin credential via DAG: {admin_credential}") + + except Exception as e: + logging.debug(f"Error parsing ACL edge content: {e}") + + # Prefer launch credential, fall back to first linked if no specific launch credential + if launch_credential: + logging.debug(f"Using launch credential from DAG: {launch_credential}") + return launch_credential + elif all_linked: + # If no explicit launch credential but we have linked users, + # prefer non-admin credential + for uid in all_linked: + if uid != admin_credential: + logging.debug(f"Using non-admin linked credential: {uid}") + return uid + # Fall back to first linked + logging.debug(f"Using first linked credential: {all_linked[0]}") + return all_linked[0] + + return None + + except Exception as e: + logging.debug(f"Error accessing DAG for launch credential: {e}") + return None + + def _get_conversation_type(protocol: str) -> str: """Map protocol to Guacamole conversation type""" # Map our protocol names to Guacamole conversation types @@ -497,6 +643,74 @@ def _get_conversation_type(protocol: str) -> str: return mapping.get(protocol, protocol) +def _extract_user_record_credentials( + params: 'KeeperParams', + user_record_uid: str +) -> Dict[str, Any]: + """ + Extract credentials from a linked pamUser record. + + This function extracts username, password, private key, and passphrase from + a pamUser record. For SSH connections, the private key is extracted using + try_extract_private_key() which checks keyPair fields, notes, custom fields, + and attachments. The password field serves as the passphrase for encrypted + private keys. + + Args: + params: KeeperParams instance + user_record_uid: UID of the linked pamUser record + + Returns: + Dictionary containing: + - username: Login username (str) + - password: Password (str) + - private_key: PEM-encoded private key if found (str or None) + - passphrase: Passphrase for encrypted private key (str or None) + """ + result = { + 'username': '', + 'password': '', + 'private_key': None, + 'passphrase': None, + } + + # Load the pamUser record + user_record = vault.KeeperRecord.load(params, user_record_uid) + if not isinstance(user_record, vault.TypedRecord): + logging.warning(f"User record {user_record_uid} is not a TypedRecord") + return result + + # Extract username from login field + login_field = user_record.get_typed_field('login') + if login_field: + result['username'] = login_field.get_default_value(str) or '' + + # Extract password + password_field = user_record.get_typed_field('password') + if password_field: + result['password'] = password_field.get_default_value(str) or '' + + # Extract private key using try_extract_private_key() + # This function checks: keyPair field, notes, custom fields (text, multiline, secret, note), and attachments + key_result = try_extract_private_key(params, user_record) + if key_result: + private_key, passphrase = key_result + result['private_key'] = private_key + # The password field serves as the passphrase for encrypted private keys + # If try_extract_private_key returned a passphrase (from password field), use it + # Otherwise, use the password we already extracted + result['passphrase'] = passphrase if passphrase else (result['password'] if result['password'] else None) + + logging.debug( + f"Extracted credentials from pamUser {user_record_uid}: " + f"username={'(set)' if result['username'] else '(empty)'}, " + f"password={'(set)' if result['password'] else '(empty)'}, " + f"private_key={'(set)' if result['private_key'] else '(empty)'}" + ) + + return result + + def _build_guacamole_connection_settings( params: 'KeeperParams', record_uid: str, @@ -504,6 +718,8 @@ def _build_guacamole_connection_settings( settings: Dict[str, Any], context: Dict[str, Any], screen_info: Dict[str, int], + user_record_uid: Optional[str] = None, + credential_type: str = 'linked', ) -> Dict[str, Any]: """ Build connection settings for Guacamole handshake in PythonHandler mode. @@ -511,32 +727,58 @@ def _build_guacamole_connection_settings( When guacd sends 'args' instruction requesting connection parameters, we respond with 'connect' containing these values. + Credential handling follows gateway behavior: + - If credential_type='linked' and user_record_uid is provided, extract credentials + from the linked pamUser record (username, password, private key) + - If credential_type='userSupplied', leave credentials empty (user provides on-the-fly) + - SSH authentication precedence: private key is tried first, then password + (standard SSH behavior handled by guacd) + Args: params: KeeperParams instance - record_uid: Record UID + record_uid: Record UID (pamMachine record) protocol: Protocol type (ssh, telnet, mysql, etc.) settings: Terminal settings from extract_terminal_settings() context: Connection context from create_connection_context() screen_info: Screen dimensions dict + user_record_uid: Optional UID of linked pamUser record for credentials + credential_type: Credential type ('linked', 'userSupplied', 'ephemeral') Returns: Dictionary with connection settings for GuacamoleHandler """ - # Get credentials from the record - record = vault.KeeperRecord.load(params, record_uid) - if not isinstance(record, vault.TypedRecord): - raise CommandError('pam launch', f'Record {record_uid} is not a TypedRecord') - - # Extract login credentials - login_field = record.get_typed_field('login') username = '' - if login_field: - username = login_field.get_default_value(str) or '' - - password_field = record.get_typed_field('password') password = '' - if password_field: - password = password_field.get_default_value(str) or '' + private_key = None + passphrase = None + + logging.debug(f"DEBUG _build_guacamole_connection_settings: credential_type={credential_type}, user_record_uid={user_record_uid}") + + # Determine how to get credentials based on credential_type + if credential_type == 'userSupplied': + # User-supplied credentials: leave empty, user will provide via guacamole prompt + logging.debug("Using userSupplied credential type - leaving credentials empty") + elif user_record_uid: + # Extract credentials from linked pamUser record + user_creds = _extract_user_record_credentials(params, user_record_uid) + username = user_creds['username'] + password = user_creds['password'] + private_key = user_creds['private_key'] + passphrase = user_creds['passphrase'] + logging.debug(f"Using credentials from linked pamUser record: {user_record_uid}") + else: + # Fallback: Get credentials from the pamMachine record directly + # (backward compatibility for records without linked pamUser) + record = vault.KeeperRecord.load(params, record_uid) + if isinstance(record, vault.TypedRecord): + login_field = record.get_typed_field('login') + if login_field: + username = login_field.get_default_value(str) or '' + + password_field = record.get_typed_field('password') + if password_field: + password = password_field.get_default_value(str) or '' + logging.debug("Using credentials from pamMachine record (no linked pamUser)") # Build guacd parameters dictionary # These map to guacd's expected parameter names @@ -550,6 +792,17 @@ def _build_guacamole_connection_settings( 'password': password, } + logging.debug(f"DEBUG guacd_params built: username={'(set)' if username else '(empty)'}, password={'(set)' if password else '(empty)'}") + + # Add private key for SSH protocol if available + # SSH authentication precedence: guacd/SSH tries private key first, then password + # Both can be present simultaneously - this matches gateway behavior + if protocol == ProtocolType.SSH and private_key: + guacd_params['private-key'] = private_key + if passphrase: + guacd_params['passphrase'] = passphrase + logging.debug("Added private-key to guacd_params for SSH authentication") + # Add protocol-specific parameters protocol_specific = settings.get('protocol_specific', {}) @@ -806,8 +1059,38 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, # Set conversationType to "python_handler" to enable PythonHandler protocol mode in Rust # The actual protocol (ssh, telnet, etc.) is passed via guacd_params["protocol"] + # IMPORTANT: Only update webrtc_settings - gateway needs the actual protocol type (ssh, telnet, etc.) + # The gateway validates conversationType against valid protocol types, not "python_handler" webrtc_settings["conversationType"] = "python_handler" - logging.debug(f"Set conversationType to 'python_handler' (actual protocol: {protocol})") + # Keep context['conversationType'] as the actual protocol (ssh, telnet, etc.) for gateway + # Do NOT change context["conversationType"] - gateway needs the real protocol type + logging.debug(f"Set webrtc_settings conversationType to 'python_handler' (gateway will receive: {context['conversationType']})") + + # Determine credential type based on allowSupplyHost, allowSupplyUser flags + # This matches gateway validation logic: + # - If allowSupplyHost=True: must be 'userSupplied' + # - If allowSupplyUser=True and no linked user: use 'userSupplied' + # - If linked user present: use 'linked' + allow_supply_host = context.get('allowSupplyHost', False) + allow_supply_user = context.get('allowSupplyUser', False) + user_record_uid = context.get('userRecordUid') + + logging.debug(f"DEBUG credential determination: allow_supply_host={allow_supply_host}, allow_supply_user={allow_supply_user}, user_record_uid={user_record_uid}") + + # credential_type is None when using pamMachine credentials directly (backward compatible) + # Priority: if user_record_uid is provided (from CLI or record), use 'linked' to send those credentials + credential_type = None + if user_record_uid: + # Linked user present (from CLI --user or record) - use linked credentials + credential_type = 'linked' + logging.debug(f"Using 'linked' credential type with userRecordUid: {user_record_uid}") + elif allow_supply_host or allow_supply_user: + # No credentials provided but supply flags enabled - user must provide interactively + credential_type = 'userSupplied' + logging.debug("No credentials provided, allowSupply enabled - using 'userSupplied' credential type") + else: + # No linked user, no supply flags - use pamMachine credentials directly + logging.debug("No linked user or supply flags - using pamMachine credentials directly") # Build connection settings for Guacamole handshake # These are used when guacd sends 'args' instruction @@ -818,6 +1101,8 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, settings=settings, context=context, screen_info=screen_info, + user_record_uid=user_record_uid, + credential_type=credential_type, ) # Create the handler and callback @@ -964,21 +1249,27 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, bytes_data = string_to_bytes(string_data) encrypted_data = tunnel_encrypt(symmetric_key, bytes_data) - # Extract userRecordUid from pamSettings - user_record_uid = None - pam_settings_field = record.get_typed_field('pamSettings') - if pam_settings_field: - pam_settings_value = pam_settings_field.get_default_value(dict) - if pam_settings_value: - connection = pam_settings_value.get('connection', {}) - if isinstance(connection, dict): - user_records = connection.get('userRecords', []) - if user_records and len(user_records) > 0: - user_record_uid = user_records[0] - logging.debug(f"Found userRecordUid: {user_record_uid}") - - if not user_record_uid: - logging.warning(f"No userRecordUid found in pamSettings for record {record_uid}") + # Get userRecordUid and credential flags from context (extracted in extract_terminal_settings) + user_record_uid = context.get('userRecordUid') + allow_supply_host = context.get('allowSupplyHost', False) + allow_supply_user = context.get('allowSupplyUser', False) + + # Determine credential type for gateway inputs + # IMPORTANT: Priority must match the guacd credentials logic above: + # 1. If user_record_uid is set (from CLI or record), use 'linked' - credentials come from that record + # 2. If allowSupply* but no user_record_uid, use 'userSupplied' - user will type at prompt + # 3. Otherwise, use pamMachine credentials directly (no credentialType) + credential_type_for_gateway = None + if user_record_uid: + # Credentials will come from linked pamUser record (via python_handler) + credential_type_for_gateway = 'linked' + logging.debug(f"Using 'linked' credential type for gateway with userRecordUid: {user_record_uid}") + elif allow_supply_host or allow_supply_user: + # No credentials provided, user must type at prompt + credential_type_for_gateway = 'userSupplied' + logging.debug("No credentials provided, allowSupply enabled - using 'userSupplied' for gateway") + else: + logging.debug(f"No linked pamUser for record {record_uid} - using pamMachine credentials directly") time.sleep(1) # Allow time for WebSocket listener to start @@ -994,10 +1285,17 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, "trickleICE": trickle_ice, # Set trickle ICE flag } - # Add userRecordUid and credentialType if we have a linked user - if user_record_uid: - inputs['userRecordUid'] = user_record_uid + # Add credential type and userRecordUid based on mode + if credential_type_for_gateway == 'linked' and user_record_uid: inputs['credentialType'] = 'linked' + inputs['userRecordUid'] = user_record_uid + elif credential_type_for_gateway == 'userSupplied': + inputs['credentialType'] = 'userSupplied' + # For userSupplied, set allow_supply_user flag in connect_as_settings + # This matches gateway behavior (line 1203 in tunnel_vault_record.py) + inputs['allowSupplyUser'] = True + logging.debug("Using userSupplied credential type - user will provide credentials") + # else: no credentialType - gateway uses pamMachine credentials directly (backward compatible) # Router token is no longer extracted from cookies (removed in commit 338a9fda) # Router affinity is now handled server-side @@ -1225,8 +1523,14 @@ def launch_terminal_connection(params: KeeperParams, logging.debug(f"Detected protocol: {protocol}") - # Step 2: Extract settings - settings = extract_terminal_settings(params, record_uid, protocol) + # Step 2: Extract settings (with optional CLI overrides) + settings = extract_terminal_settings( + params, + record_uid, + protocol, + launch_credential_uid=kwargs.get('launch_credential_uid'), + custom_host=kwargs.get('custom_host'), + ) logging.debug(f"Extracted settings: hostname={settings['hostname']}, port={settings['port']}") # Step 3: Build connection context From 2b7ef9f3802bff6c2728463830ffb795c00173a3 Mon Sep 17 00:00:00 2001 From: Ayrris Aunario Date: Mon, 12 Jan 2026 18:32:14 -0600 Subject: [PATCH 07/24] Fix compliance spinners and whoami expiration; add pytest (#1759) --- .gitignore | 1 + keepercommander/commands/aram.py | 63 +++++++-- keepercommander/commands/compliance.py | 87 ++++++++++--- keepercommander/commands/security_audit.py | 143 ++++++++++++++++++++- keepercommander/commands/utils.py | 35 +++-- keepercommander/display.py | 11 +- keepercommander/sox/__init__.py | 68 +++++++++- requirements-dev.txt | 1 + unit-tests/service/test_service_manager.py | 8 +- 9 files changed, 360 insertions(+), 57 deletions(-) diff --git a/.gitignore b/.gitignore index 0e3650881..7bc716bf0 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,4 @@ dr-logs CLAUDE.md AGENTS.md keeper_db.sqlite +.keeper-memory-mcp/ \ No newline at end of file diff --git a/keepercommander/commands/aram.py b/keepercommander/commands/aram.py index 2eaf77f77..ad7eeef9f 100644 --- a/keepercommander/commands/aram.py +++ b/keepercommander/commands/aram.py @@ -150,7 +150,7 @@ action_report_parser.add_argument('--days-since', '-d', dest='days_since', action='store', type=int, help='number of days since event of interest (e.g., login, record add/update, lock)') action_report_columns = {'name', 'status', 'transfer_status', 'node', 'team_count', 'teams', 'role_count', 'roles', - 'alias', '2fa_enabled'} + 'alias', '2fa_enabled', 'lock_time'} columns_help = f'comma-separated list of columns to show on report. Supported columns: {action_report_columns}' columns_help = re.sub('\'', '', columns_help) action_report_parser.add_argument('--columns', dest='columns', action='store', type=str, @@ -2100,6 +2100,33 @@ def get_no_action_users(candidate_users, days_since, event_types, name_key='user excluded = get_excluded(included, query_filter, name_key) return [user for user in candidate_users if user.get('username') not in excluded] + def chunk_list(items, chunk_size): + for i in range(0, len(items), chunk_size): + yield items[i:i + chunk_size] + + def get_latest_lock_times(usernames): + # type: (Set[str]) -> Dict[str, int] + if not usernames: + return {} + lock_times = {} + username_list = sorted({u.lower() for u in usernames if u}) + now_ts = int(datetime.datetime.now().timestamp()) + for chunk in chunk_list(username_list, API_EVENT_SUMMARY_ROW_LIMIT): + query_filter = { + 'audit_event_type': ['lock_user'], + 'to_username': chunk, + 'created': {'min': 0, 'max': now_ts} + } + rq = report_rq(query_filter, API_EVENT_SUMMARY_ROW_LIMIT, cols=['to_username'], report_type='span') + rs = api.communicate(params, rq) + events = rs.get('audit_event_overview_report_rows', []) + for event in events: + username = (event.get('to_username') or '').lower() + ts = int(event.get('last_created') or 0) + if username and ts: + lock_times[username] = max(lock_times.get(username, 0), ts) + return lock_times + def get_action_results_text(cmd, cmd_status, server_msg, affected): return f'\tCOMMAND: {cmd}\n\tSTATUS: {cmd_status}\n\tSERVER MESSAGE: {server_msg}\n\tAFFECTED: {affected}' @@ -2183,10 +2210,10 @@ def apply_admin_action(targets, status='no-update', action='none', dryrun=False) 'none': partial(run_cmd, targets, None, None, dryrun), 'lock': partial(run_cmd, targets, lambda: exec_fn(params, email=emails, lock=True, force=True, return_results=True), - 'lock', dry_run), + 'lock', dryrun), 'delete': partial(run_cmd, targets, lambda: exec_fn(params, email=emails, delete=True, force=True, return_results=True), - 'delete', dry_run), + 'delete', dryrun), 'transfer': partial(transfer_accounts, targets, kwargs.get('target_user'), dryrun) } @@ -2202,13 +2229,21 @@ def apply_admin_action(targets, status='no-update', action='none', dryrun=False) return action_handlers.get(action, lambda: invalid_action_msg)() if is_valid_action else invalid_action_msg - def get_report_data_and_headers(targets, output_fmt): - # type: (Set[str], str) -> Tuple[List[List[Any]], List[str]] + def get_report_data_and_headers(targets, output_fmt, columns=None, lock_times=None): + # type: (Set[str], str, Optional[str], Optional[Dict[str, int]]) -> Tuple[List[List[Any]], List[str]] cmd = EnterpriseInfoCommand() - output = cmd.execute(params, users=True, quiet=True, format='json', columns=kwargs.get('columns')) + output = cmd.execute(params, users=True, quiet=True, format='json', columns=columns) data = json.loads(output) - data = [u for u in data if u.get('email') in targets] - fields = next(iter(data)).keys() if data else [] + targets_lower = {t.lower() for t in targets if t} + data = [u for u in data if (u.get('email') or '').lower() in targets_lower] + if lock_times is not None: + for user in data: + email = (user.get('email') or '').lower() + lock_ts = lock_times.get(email) + user['lock_time'] = datetime.datetime.fromtimestamp(lock_ts) if lock_ts else None + fields = list(next(iter(data)).keys()) if data else [] + if lock_times is not None and 'lock_time' not in fields: + fields.append('lock_time') headers = [field_to_title(f) for f in fields] if output_fmt != 'json' else list(fields) data = [[user.get(f) for f in fields] for user in data] return data, headers @@ -2305,10 +2340,20 @@ def get_descendant_nodes(node_id): target_users = get_no_action_users(*args) usernames = {user['username'] for user in target_users} + columns_arg = kwargs.get('columns') + columns = {c.strip().lower() for c in columns_arg.split(',') if c.strip()} if columns_arg else set() + include_lock_time = ('lock_time' in columns) if columns_arg else target_status == 'locked' + columns_param = None + if columns_arg: + columns_without_lock = [c for c in columns if c != 'lock_time'] + if columns_without_lock: + columns_param = ','.join(columns_without_lock) + admin_action = kwargs.get('apply_action', 'none') dry_run = kwargs.get('dry_run') fmt = kwargs.get('format', 'table') - report_data, report_headers = get_report_data_and_headers(usernames, fmt) + lock_times = get_latest_lock_times(usernames) if include_lock_time else None + report_data, report_headers = get_report_data_and_headers(usernames, fmt, columns=columns_param, lock_times=lock_times) action_msg = apply_admin_action(target_users, target_status, admin_action, dry_run) # Sync local enterprise data if changes were made diff --git a/keepercommander/commands/compliance.py b/keepercommander/commands/compliance.py index 49d59e8e9..c813850bb 100644 --- a/keepercommander/commands/compliance.py +++ b/keepercommander/commands/compliance.py @@ -10,6 +10,7 @@ from .base import GroupCommand, dump_report_data, field_to_title, report_output_parser from .enterprise_common import EnterpriseCommand from ..sox.sox_types import RecordPermissions +from ..display import Spinner from .helpers.reporting import filter_rows from .. import sox, api from ..error import CommandError @@ -375,6 +376,8 @@ def execute(self, params, **kwargs): # type: (KeeperParams, any) -> any def generate_report_data(self, params, kwargs, sox_data, report_fmt, node, root_node): # type: (KeeperParams, Dict[str, Any], sox.sox_data.SoxData, str, int, int) -> List[List[str]] + use_spinner = not params.batch_mode + def get_records_accessed_rq(email, filter_recs=None, created_max=None): # type: (str, Optional[List[str]], Optional[int]) -> Union[None, Dict[str, Any]] # Empty record filter list -> no records to search for @@ -456,14 +459,23 @@ def compile_user_report(user, access_events): def get_aging_data(rec_ids): if not rec_ids: return {} - aging_data = {r: {'created': None, 'last_modified': None, 'last_rotation': None} for r in rec_ids if r} + aging_data = {r: {'created': None, 'last_modified': None, 'last_rotation': None, 'last_pw_change': None} + for r in rec_ids if r} now = datetime.datetime.now() max_stored_age_dt = now - datetime.timedelta(days=1) max_stored_age_ts = int(max_stored_age_dt.timestamp()) stored_aging_data = {} if not kwargs.get('no_cache'): stored_entities = sox_data.storage.get_record_aging().get_all() - stored_aging_data = {e.record_uid: {'created': from_ts(e.created), 'last_modified': from_ts(e.last_modified), 'last_rotation': from_ts(e.last_rotation)} for e in stored_entities if e.record_uid} + stored_aging_data = { + e.record_uid: { + 'created': from_ts(e.created), + 'last_modified': from_ts(e.last_modified), + 'last_rotation': from_ts(e.last_rotation), + 'last_pw_change': from_ts(e.last_pw_change), + } + for e in stored_entities if e.record_uid + } aging_data.update(stored_aging_data) def get_requests(filter_recs, filter_type, order='descending', aggregate='last_created'): @@ -494,7 +506,8 @@ def get_request_params(record_aging_event): types_by_aging_event = dict( created = [], last_modified = ['record_update'], - last_rotation = ['record_rotation_scheduled_ok', 'record_rotation_on_demand_ok'] + last_rotation = ['record_rotation_scheduled_ok', 'record_rotation_on_demand_ok'], + last_pw_change = ['record_password_change'] ) filter_types = types_by_aging_event.get(record_aging_event) order, aggregate = ('ascending', 'first_created') if record_aging_event == 'created' \ @@ -522,13 +535,31 @@ def get_aging_event_dts(event_type): record_timestamps = {event.get('record_uid'): event.get(aggregate) for event in events if event.get('record_uid')} return {rec: from_ts(ts) for rec, ts in record_timestamps.items()} - aging_stats = ['created', 'last_modified', 'last_rotation'] - record_events_by_stat = {stat: get_aging_event_dts(stat) for stat in aging_stats} + aging_stats = ['created', 'last_modified', 'last_rotation', 'last_pw_change'] + spinner = None + try: + if use_spinner: + should_fetch_events = any(get_request_params(stat)[0] for stat in aging_stats) + if should_fetch_events: + spinner = Spinner('Loading record aging events...') + spinner.start() + record_events_by_stat = {} + for stat in aging_stats: + if spinner: + spinner.message = f'Loading record aging events - {stat}' + record_events_by_stat[stat] = get_aging_event_dts(stat) + finally: + if spinner: + spinner.stop() for stat, record_event_dts in record_events_by_stat.items(): for record, dt in record_event_dts.items(): aging_data.get(record, {}).update({stat: dt}) stat == 'created' and aging_data.get(record, {}).setdefault('last_modified', dt) + for record, events in aging_data.items(): + if events.get('last_pw_change') is None: + events['last_pw_change'] = events.get('created') + if not kwargs.get('no_cache'): save_aging_data(aging_data) return aging_data @@ -542,12 +573,15 @@ def save_aging_data(aging_data): entity = existing_entities.get_entity(r) or StorageRecordAging(r) created_dt = events.get('created') created_ts = int(created_dt.timestamp()) if created_dt else 0 + pw_change_dt = events.get('last_pw_change') + pw_change_ts = int(pw_change_dt.timestamp()) if pw_change_dt else 0 modified_dt = events.get('last_modified') modified_ts = int(modified_dt.timestamp()) if modified_dt else 0 rotation_dt = events.get('last_rotation') rotation_ts = int(rotation_dt.timestamp()) if rotation_dt else 0 entity.created = created_ts + entity.last_pw_change = pw_change_ts entity.last_modified = modified_ts entity.last_rotation = rotation_ts updated_entities.append(entity) @@ -571,22 +605,33 @@ def get_records_accessed(emails, limit_to_vault=False): records_accessed_by_user = {e: dict() for e in emails} filters_by_user = {e: dict(filter_recs=get_rec_filter(e)) for e in emails} should_query = lambda rq_filter: rq_filter and (rq_filter.get('filter_recs') or not limit_to_vault) + spinner = None + total_users = len(emails) # Make requests in batches, walking backwards in time (w/ query filters) for all users in parallel (1 user per sub-request) - while True: - users_to_query = [user for user, user_filter in filters_by_user.items() if should_query(user_filter)] - if not users_to_query: - break - requests = [get_records_accessed_rq(email, **filters_by_user.get(email)) for email in users_to_query] - responses = api.execute_batch(params, requests) - responses_by_user = zip(users_to_query, responses) - for user, response in responses_by_user: - access_events = response.get('audit_event_overview_report_rows', []) - records_accessed = records_accessed_by_user.get(user, {}) - records_accessed_new, filters = process_access_events(access_events, filter_recs=filters_by_user.get(user, {}).get('filter_recs')) - for rec_uid, event in records_accessed_new.items(): - records_accessed.setdefault(rec_uid, event) - records_accessed_by_user.update({user: records_accessed}) - filters_by_user.update({user: filters}) + try: + while True: + users_to_query = [user for user, user_filter in filters_by_user.items() if should_query(user_filter)] + if not users_to_query: + break + if use_spinner and not spinner: + spinner = Spinner('Loading record access events...') + spinner.start() + if spinner: + spinner.message = f'Loading record access events - Users remaining: {len(users_to_query)}/{total_users}' + requests = [get_records_accessed_rq(email, **filters_by_user.get(email)) for email in users_to_query] + responses = api.execute_batch(params, requests) + responses_by_user = zip(users_to_query, responses) + for user, response in responses_by_user: + access_events = response.get('audit_event_overview_report_rows', []) + records_accessed = records_accessed_by_user.get(user, {}) + records_accessed_new, filters = process_access_events(access_events, filter_recs=filters_by_user.get(user, {}).get('filter_recs')) + for rec_uid, event in records_accessed_new.items(): + records_accessed.setdefault(rec_uid, event) + records_accessed_by_user.update({user: records_accessed}) + filters_by_user.update({user: filters}) + finally: + if spinner: + spinner.stop() return records_accessed_by_user from ..sox.storage_types import StorageRecordAging @@ -612,7 +657,7 @@ def get_records_accessed(emails, limit_to_vault=False): default_columns = ['vault_owner', 'record_uid', 'record_title', 'record_type', 'record_url', 'has_attachments', 'in_trash', 'record_owner', 'ip_address', 'device', 'last_access'] - aging_columns = ['created', 'last_modified', 'last_rotation'] if aging else [] + aging_columns = ['created', 'last_pw_change', 'last_modified', 'last_rotation'] if aging else [] self.report_headers = default_columns + aging_columns record_access_events = get_records_accessed(usernames, report_type != report_type_default) diff --git a/keepercommander/commands/security_audit.py b/keepercommander/commands/security_audit.py index f8fe874c0..7b0547ab6 100644 --- a/keepercommander/commands/security_audit.py +++ b/keepercommander/commands/security_audit.py @@ -40,6 +40,8 @@ def register_command_info(aliases, command_info): report_parser.add_argument('-su', '--show-updated', action='store_true', help='show updated data') report_parser.add_argument('-st', '--score-type', action='store', choices=['strong_passwords', 'default'], default='default', help='define how score is calculated') +record_detail_help = 'output per-record password strength details (requires incremental security data)' +report_parser.add_argument('--record-details', dest='record_details', action='store_true', help=record_detail_help) attempt_fix_help = ('do a "hard" sync for vaults with invalid security-data. Associated security scores are reset and ' 'will be inaccurate until affected vaults can re-calculate and update their security-data') report_parser.add_argument('--attempt-fix', action='store_true', help=attempt_fix_help) @@ -87,10 +89,21 @@ def __init__(self): securityScore security score twoFactorChannel 2FA - ON/OFF +Record Detail Report (--record-details) + email vault owner email + name vault owner name + record_uid record UID + strength password strength score + strength_category weak|fair|medium|strong + node node path + --report-type: csv CSV format json JSON format table Table format (default) + +Examples: + security-audit report --record-details --format csv --output security_audit_records.csv ''' @@ -194,6 +207,7 @@ def execute(self, params, **kwargs): return self.enterprise_private_rsa_key = None + self._node_map = None # Reset node cache for correct MC context self.params = params show_breachwatch = kwargs.get('breachwatch') @@ -218,14 +232,78 @@ def get_node_id(name_or_id): score_type = kwargs.get('score_type', 'default') save_report = kwargs.get('save') or attempt_fix show_updated = save_report or kwargs.get('show_updated') + record_details = kwargs.get('record_details') updated_security_reports = [] tree_key = params.enterprise['unencrypted_tree_key'] from_page = 0 complete = False rows = [] + record_detail_rows = [] + record_detail_fields = ('email', 'name', 'record_uid', 'strength', 'strength_category', 'node') + has_incremental_data = False + user_lookup = {} # {enterprise_user_id: {email, name, node_path}} for record_details + + # Build fresh user lookup from params.enterprise (reflects current MC context after switch-to-mc) + enterprise_user_lookup = {} + for u in params.enterprise.get('users', []): + enterprise_user_lookup[u['enterprise_user_id']] = u + rsa_key = get_enterprise_key(params, is_rsa=True) # type: Optional[rsa.RSAPrivateKey] ec_key = get_enterprise_key(params, is_rsa=False) # type: Optional[ec.EllipticCurvePrivateKey] + def get_strength_category(score): + if score is None: + return None + if utils.is_pw_strong(score): + return 'strong' + if utils.is_pw_fair(score): + return 'fair' + if utils.is_pw_weak(score): + return 'weak' + return 'medium' + + def decrypt_security_data(sec_data, key_type): + if not sec_data: + return None + try: + if key_type == enterprise_pb2.KT_ENCRYPTED_BY_PUBLIC_KEY_ECC: + if ec_key is None: + return None + decrypted_bytes = crypto.decrypt_ec(sec_data, ec_key) + else: + if rsa_key is None: + return None + decrypted_bytes = crypto.decrypt_rsa(sec_data, rsa_key) + except Exception as e: + decrypted_bytes = try_enterprise_decrypt(self.params, sec_data) + if not decrypted_bytes: + error = f'Decrypt fail (incremental data): {e}' + self.get_error_report_builder().update_report_data(error) + return None + + try: + decoded = decrypted_bytes.decode() + except UnicodeDecodeError: + error = f'Decode fail, incremental data (base 64):' + self.get_error_report_builder().update_report_data(error) + decoded_b64 = base64.b64encode(decrypted_bytes).decode('ascii') + self.get_error_report_builder().update_report_data(decoded_b64) + return None + except Exception as e: + error = f'Decode fail: {e}' + self.get_error_report_builder().update_report_data(error) + return None + + try: + return json.loads(decoded) + except JSONDecodeError: + error = f'Invalid JSON: {decoded}' + self.get_error_report_builder().update_report_data(error) + except Exception as e: + error = f'Load fail (incremental data). {e}' + self.get_error_report_builder().update_report_data(error) + return None + while not complete: rq = APIRequest_pb2.SecurityReportRequest() rq.fromPage = from_page @@ -247,13 +325,17 @@ def get_node_id(name_or_id): continue for sr in security_report_data_rs.securityReport: - user_info = self.resolve_user_info(params, sr.enterpriseUserId) - node_id = user_info.get('node_id', 0) + # Use fresh enterprise_user_lookup (not cached resolve_user_info) for correct MC context + eu = enterprise_user_lookup.get(sr.enterpriseUserId, {}) + node_id = eu.get('node_id', 0) if node_ids and node_id not in node_ids: continue - user = user_info['username'] if 'username' in user_info else str(sr.enterpriseUserId) - email = user_info['email'] if 'email' in user_info else str(sr.enterpriseUserId) - node_path = self.get_node_path(params, node_id) if node_id > 0 else '' + email = eu.get('username', str(sr.enterpriseUserId)) + user_data = eu.get('data') if isinstance(eu.get('data'), dict) else {} + user = user_data.get('displayname') or email + node_path = self.get_node_path(params, node_id, omit_root=True) + if record_details: + user_lookup[sr.enterpriseUserId] = {'email': email, 'name': user, 'node_path': node_path} twofa_on = False if sr.twoFactor == 'two_factor_disabled' else True row = { 'name': user, @@ -334,6 +416,48 @@ def get_node_id(name_or_id): rows.append(row) + # Fetch per-record security data via get_incremental_security_data (has recordUid) + if record_details and user_lookup: + continuation_token = b'' + while True: + inc_rq = APIRequest_pb2.IncrementalSecurityDataRequest() + if continuation_token: + inc_rq.continuationToken = continuation_token + try: + inc_rs = api.communicate_rest( + params, inc_rq, 'enterprise/get_incremental_security_data', + rs_type=APIRequest_pb2.IncrementalSecurityDataResponse) + except Exception as e: + logging.debug(f'get_incremental_security_data failed: {e}') + break + + for inc_data in inc_rs.securityReportIncrementalData: + record_uid = utils.base64_url_encode(inc_data.recordUid) if inc_data.recordUid else '' + if not record_uid: + continue + has_incremental_data = True + user_info = user_lookup.get(inc_data.enterpriseUserId) + if not user_info: + continue + curr_data = decrypt_security_data(inc_data.currentSecurityData, inc_data.currentDataEncryptionType) + if not curr_data: + continue + strength = curr_data.get('strength') + if strength is None: + continue + record_detail_rows.append([ + user_info['email'], + user_info['name'], + record_uid, + strength, + get_strength_category(strength), + user_info['node_path'] + ]) + + if not inc_rs.continuationToken: + break + continuation_token = inc_rs.continuationToken + fmt = kwargs.get('format', 'table') out = kwargs.get('output') @@ -356,6 +480,15 @@ def get_node_id(name_or_id): if save_report: self.save_updated_security_reports(params, updated_security_reports) + if record_details: + if not has_incremental_data: + logging.warning('No incremental security data available for record detail output.') + field_descriptions = record_detail_fields + if fmt == 'table': + field_descriptions = [field_to_title(x) for x in record_detail_fields] + report_title = 'Security Audit Report (Record Details)' + return dump_report_data(record_detail_rows, field_descriptions, fmt=fmt, filename=out, title=report_title) + fields = ('email', 'name', 'sync_pending', 'at_risk', 'passed', 'ignored') if show_breachwatch else \ ('email', 'name', 'sync_pending', 'weak', 'fair', 'medium', 'strong', 'reused', 'unique', 'securityScore', 'twoFactorChannel', 'node') diff --git a/keepercommander/commands/utils.py b/keepercommander/commands/utils.py index a46560670..d01819895 100644 --- a/keepercommander/commands/utils.py +++ b/keepercommander/commands/utils.py @@ -1499,16 +1499,29 @@ def yes_no(val): paid = x.get('paid') is True if paid: exp = x.get('expiration') - if exp > 0: - dt = datetime.datetime.fromtimestamp(exp // 1000) + datetime.timedelta(days=1) - n = datetime.datetime.now() - td = (dt - n).days - expires = str(dt.date()) - if td > 0: - expires += f' (in {td} days)' - else: - expires += f' ({Fore.RED}expired{Fore.RESET})' - print(label_value('Expires', expires)) + try: + exp_seconds = int(exp) // 1000 + except (TypeError, ValueError): + exp_seconds = 0 + if exp_seconds > 0: + try: + dt = datetime.datetime.fromtimestamp(exp_seconds) + except (OSError, OverflowError, ValueError): + # Avoid platform timestamp limits for far-future licenses. + try: + dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=exp_seconds) + except (OverflowError, ValueError): + dt = None + if dt: + dt = dt + datetime.timedelta(days=1) + n = datetime.datetime.now() + td = (dt - n).days + expires = str(dt.date()) + if td > 0: + expires += f' (in {td} days)' + else: + expires += f' ({Fore.RED}expired{Fore.RESET})' + print(label_value('Expires', expires)) seats_plan = x.get("number_of_seats", "") seats_active = x.get("seats_allocated", "") seats_invited = x.get("seats_pending", "") @@ -2494,4 +2507,4 @@ def on_sso_data_key(self, step): logging.debug(f"Non-interactive login failed: {e}") print("Not logged in") else: - print("Not logged in") \ No newline at end of file + print("Not logged in") diff --git a/keepercommander/display.py b/keepercommander/display.py index a05ca5911..7ebc9e46d 100644 --- a/keepercommander/display.py +++ b/keepercommander/display.py @@ -262,18 +262,25 @@ def __init__(self, message=""): self.message = message self.running = False self.thread = None + self._last_visible_len = 0 def _animate(self): idx = 0 while self.running: frame = self.FRAMES[idx % len(self.FRAMES)] - sys.stdout.write(f'\r{Fore.CYAN}{frame}{Fore.RESET} {self.message}') + message = self.message or '' + visible_len = len(message) + 2 # frame + space + message + pad = max(0, self._last_visible_len - visible_len) + sys.stdout.write(f'\r{Fore.CYAN}{frame}{Fore.RESET} {message}' + (' ' * pad)) sys.stdout.flush() + self._last_visible_len = visible_len + pad idx += 1 time.sleep(0.08) # Clear the line when done - sys.stdout.write('\r' + ' ' * (len(self.message) + 4) + '\r') + clear_len = max(self._last_visible_len, len(self.message or '') + 2) + sys.stdout.write('\r' + ' ' * clear_len + '\r') sys.stdout.flush() + self._last_visible_len = 0 def start(self): self.running = True diff --git a/keepercommander/sox/__init__.py b/keepercommander/sox/__init__.py index 0fd70834d..cd101e888 100644 --- a/keepercommander/sox/__init__.py +++ b/keepercommander/sox/__init__.py @@ -6,6 +6,7 @@ from typing import Dict, Tuple from .. import api, crypto, utils +from ..display import Spinner # Module-level connection cache to ensure single connection per database _connection_cache = {} # type: Dict[str, sqlite3.Connection] @@ -75,6 +76,19 @@ def get_sox_database_name(params, enterprise_id): # type: (KeeperParams, int) - def get_prelim_data(params, enterprise_id=0, rebuild=False, min_updated=0, cache_only=False, no_cache=False, shared_only=False): # type: (KeeperParams, int, bool, int, bool, bool, bool) -> sox_data.SoxData def sync_down(name_by_id, store): # type: (Dict[int, str], sqlite_storage.SqliteSoxStorage) -> None + spinner = None + use_spinner = not params.batch_mode + + def start_spinner(): + nonlocal spinner + if use_spinner and not spinner: + spinner = Spinner('Loading record information...') + spinner.start() + + def stop_spinner(): + if spinner: + spinner.stop() + def to_storage_types(user_data, username_lookup): def to_record_entity(record): record_uid_bytes = record.recordUid @@ -108,10 +122,16 @@ def to_user_record_link(uuid, ruid): return user_ent, record_ents, user_rec_links def print_status(users_loaded, users_total, records_loaded, records_total): + message = (f'Loading record information - Users: {users_loaded}/{users_total}, ' + f'Current Batch: {records_loaded}/{records_total}') + if spinner: + spinner.message = message + return print('\r' + (100 * ' '), file=sys.stderr, end='', flush=True) - print(f'\rLoading record information - Users: {users_loaded}/{users_total}, Current Batch: {records_loaded}/{records_total}', file=sys.stderr, end='', flush=True) + print(f'\r{message}', file=sys.stderr, end='', flush=True) def sync_all(): + start_spinner() user_ids = list(user_lookup.keys()) users_total = len(user_ids) records_total = 0 @@ -170,8 +190,16 @@ def sync_all(): logging.error(f'Data could not fetched for the following users: \n{problem_emails}') store.rebuild_prelim_data(users, records, links) - sync_all() - print('', file=sys.stderr, flush=True) + success = False + try: + sync_all() + success = True + finally: + stop_spinner() + if spinner and success: + print('Preliminary compliance data loaded.', flush=True) + elif not spinner: + print('', file=sys.stderr, flush=True) validate_data_access(params) enterprise_id = enterprise_id or next(((x['node_id'] >> 32) for x in params.enterprise['nodes']), 0) @@ -200,12 +228,30 @@ def sync_all(): def get_compliance_data(params, node_id, enterprise_id=0, rebuild=False, min_updated=0, no_cache=False, shared_only=False): def sync_down(sdata, node_uid, user_node_id_lookup): recs_processed = 0 + spinner = None + use_spinner = not params.batch_mode + def print_status(pct_done): + message = f'Loading compliance data - {pct_done * 100:.2f}%' + if spinner: + spinner.message = message + return print('\r' + (100 * ' '), file=sys.stderr, end='', flush=True) - print(f'\rLoading compliance data - {pct_done * 100:.2f}%', file=sys.stderr, end='', flush=True) + print(f'\r{message}', file=sys.stderr, end='', flush=True) + + def start_spinner(): + nonlocal spinner + if use_spinner and not spinner: + spinner = Spinner('Loading compliance data...') + spinner.start() + + def stop_spinner(): + if spinner: + spinner.stop() def run_sync_tasks(): def do_tasks(): + start_spinner() print_status(0) users_uids = [int(uid) for uid in sdata.get_users()] record_uids_raw = [rec.record_uid_bytes for rec in sdata.get_records().values()] @@ -215,9 +261,17 @@ def do_tasks(): for chunk in ruid_chunks: sync_chunk(chunk, users_uids) sdata.storage.set_compliance_data_updated() - print('', file=sys.stderr, flush=True) - - do_tasks() + if not spinner: + print('', file=sys.stderr, flush=True) + + success = False + try: + do_tasks() + success = True + finally: + stop_spinner() + if spinner and success: + print('Compliance data loaded.', flush=True) def sync_chunk(chunk, uuids): rs = fetch_response(raw_ruids=chunk, user_uids=uuids) diff --git a/requirements-dev.txt b/requirements-dev.txt index a69ec7f4e..4e725ed30 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,2 +1,3 @@ radon pylint +pytest diff --git a/unit-tests/service/test_service_manager.py b/unit-tests/service/test_service_manager.py index 884ff204a..86d2a4827 100644 --- a/unit-tests/service/test_service_manager.py +++ b/unit-tests/service/test_service_manager.py @@ -68,11 +68,15 @@ def test_stop_service_when_running(self): with mock.patch('sys.platform', 'linux'), \ mock.patch('os.getpid', return_value=9999), \ - mock.patch('psutil.Process') as mock_process: + mock.patch('psutil.Process') as mock_process, \ + mock.patch('keepercommander.service.core.service_manager.ServiceManager.kill_process_by_pid', return_value=True) as mock_kill, \ + mock.patch('keepercommander.service.core.service_manager.ServiceManager.kill_ngrok_processes', return_value=False), \ + mock.patch('keepercommander.service.core.service_manager.ServiceManager.kill_cloudflare_processes', return_value=False): stop_cmd = StopService() stop_cmd.execute(self.params) + mock_kill.assert_called_once_with(12345) mock_process.return_value.terminate.assert_called_once() self.assertFalse(ProcessInfo._env_file.exists()) @@ -178,4 +182,4 @@ def test_start_service_with_missing_port(self): "Error: Service configuration is incomplete. Please configure the service port in service_config" ) - mock_app.run.assert_not_called() \ No newline at end of file + mock_app.run.assert_not_called() From 1998d651d9d1ef98649ed6eed8a280fb566d1646 Mon Sep 17 00:00:00 2001 From: pvagare-ks Date: Tue, 13 Jan 2026 20:02:29 +0530 Subject: [PATCH 08/24] Fix MSP addon validations, `msp-update`, and seat handling consistency (#1757) (#1761) --- keepercommander/commands/msp.py | 85 ++++++++++++++++++++++++++++----- keepercommander/constants.py | 2 + 2 files changed, 75 insertions(+), 12 deletions(-) diff --git a/keepercommander/commands/msp.py b/keepercommander/commands/msp.py index fbeb6ebb0..f42db59a0 100644 --- a/keepercommander/commands/msp.py +++ b/keepercommander/commands/msp.py @@ -27,6 +27,11 @@ from ..params import KeeperParams from ..proto import enterprise_pb2, BI_pb2, APIRequest_pb2 +# Addon name constants +KEPM_ADDON = 'keeper_endpoint_privilege_manager' +REMOTE_BROWSER_ISOLATION_ADDON = 'remote_browser_isolation' +CONNECTION_MANAGER_ADDON = 'connection_manager' + def register_commands(commands): commands['msp-down'] = GetMSPDataCommand() @@ -468,6 +473,17 @@ def execute(self, params, **kwargs): product_plan = next((x for x in constants.MSP_PLANS if product_id == x[1].lower()), None) if product_plan and product_plan[3] < file_plan[0]: rq['file_plan_type'] = file_plan[1] + else: + existing_file_plan = current_mc.get('file_plan_type') + if existing_file_plan: + product_id = rq['product_id'].lower() + product_plan = next((x for x in constants.MSP_PLANS if product_id == x[1].lower()), None) + if product_plan: + file_plan = next((x for x in constants.MSP_FILE_PLANS if x[1] == existing_file_plan), None) + if file_plan: + base_file_plan_id = product_plan[3] + if file_plan[0] != base_file_plan_id: + rq['file_plan_type'] = existing_file_plan addons = {} for ao in current_mc.get('add_ons', []): @@ -475,9 +491,9 @@ def execute(self, params, **kwargs): continue if ao.get('included_in_product') is True: continue - addon_name = ao['name'] + addon_name = ao['name'].lower() # Normalize to lowercase for consistency keep_addon = { - 'add_on': addon_name + 'add_on': ao['name'] } seats = ao.get('seats') if seats > 0: @@ -495,11 +511,21 @@ def execute(self, params, **kwargs): raise CommandError('msp-update',f'Addon \"{addon_name}\" is not found') addon_seats = 0 if sep == ':' and addon[2] and action == 'add_addon': - try: - addon_seats = int(seats) - except: - raise CommandError('msp-update', - f'Addon \"{addon_name}\". Number of seats \"{seats}\" is not integer') + if addon_name == KEPM_ADDON and seats.strip() == '-1': + addon_seats = 2147483647 + seats = '2147483647' + else: + try: + addon_seats = int(seats) + except: + raise CommandError('msp-update', + f'Addon \"{addon_name}\". Number of seats \"{seats}\" is not integer') + if addon_name == KEPM_ADDON: + valid_int_seats = {x for x in constants.KEPM_VALID_SEATS if isinstance(x, int)} + if addon_seats not in valid_int_seats and addon_seats != 2147483647: + valid_values = ', '.join(str(x) for x in sorted(valid_int_seats) + ['-1 (for unlimited)']) + raise CommandError('msp-update', + f'Addon \"{addon_name}\". Invalid seat value \"{seats}\". Valid values are: {valid_values}') if action == 'add_addon': if permits: if addon_name not in (x.lower() for x in permits['allowed_add_ons']): @@ -514,6 +540,19 @@ def execute(self, params, **kwargs): else: if addon_name in addons: del addons[addon_name] + + addon_names = {name.lower() for name in addons.keys()} + if REMOTE_BROWSER_ISOLATION_ADDON in addon_names: + if CONNECTION_MANAGER_ADDON not in addon_names: + raise CommandError('msp-update', + f'Addon \"{REMOTE_BROWSER_ISOLATION_ADDON}\" requires \"{CONNECTION_MANAGER_ADDON}\" to be selected') + cm_addon = addons.get(CONNECTION_MANAGER_ADDON) + if cm_addon: + cm_seats = cm_addon.get('seats', 0) + if not cm_seats or cm_seats == 0: + raise CommandError('msp-update', + f'Addon \"{REMOTE_BROWSER_ISOLATION_ADDON}\" requires \"{CONNECTION_MANAGER_ADDON}\" to have seats specified (e.g., {CONNECTION_MANAGER_ADDON}:N)') + rq['add_ons'] = list(addons.values()) rs = api.communicate(params, rq) if rs['result'] == 'success': @@ -932,6 +971,7 @@ def execute(self, params, **kwargs): addons = kwargs.get('addon') if isinstance(addons, list): rq['add_ons'] = [] + addon_data = {} # Track addon name -> seat count for validation for v in addons: addon_name, sep, seats = v.partition(':') addon_name = addon_name.lower() @@ -945,18 +985,39 @@ def execute(self, params, **kwargs): return addon_seats = 0 if sep == ':' and addon[2]: - try: - addon_seats = int(seats) - except: - logging.warning('Addon \"%s\". Number of seats \"%s\" is not integer', addon_name, seats) - return + if addon_name == KEPM_ADDON and seats.strip() == '-1': + addon_seats = 2147483647 # Use max int for unlimited, similar to seats handling + else: + try: + addon_seats = int(seats) + except: + logging.warning('Addon \"%s\". Number of seats \"%s\" is not integer', addon_name, seats) + return + if addon_name == KEPM_ADDON: + valid_int_seats = {x for x in constants.KEPM_VALID_SEATS if isinstance(x, int)} + if addon_seats not in valid_int_seats and addon_seats != 2147483647: + valid_values = ', '.join(str(x) for x in sorted(valid_int_seats) + ['-1 (for unlimited)']) + logging.warning('Addon \"%s\". Invalid seat value \"%s\". Valid values are: %s', addon_name, seats, valid_values) + return rqa = { 'add_on': addon[0] } if addon_seats > 0: rqa['seats'] = addon_seats + addon_data[addon_name] = addon_seats + else: + addon_data[addon_name] = 0 rq['add_ons'].append(rqa) + # Validate that Remote Browser Isolation requires Keeper Connection Manager with seats + if REMOTE_BROWSER_ISOLATION_ADDON in addon_data: + if CONNECTION_MANAGER_ADDON not in addon_data: + logging.warning('Addon \"%s\" requires \"%s\" to be selected', REMOTE_BROWSER_ISOLATION_ADDON, CONNECTION_MANAGER_ADDON) + return + if addon_data[CONNECTION_MANAGER_ADDON] == 0: + logging.warning('Addon \"%s\" requires \"%s\" to have seats specified (e.g., %s:N)', REMOTE_BROWSER_ISOLATION_ADDON, CONNECTION_MANAGER_ADDON, CONNECTION_MANAGER_ADDON) + return + company_id = -1 rs = api.communicate(params, rq) if rs: diff --git a/keepercommander/constants.py b/keepercommander/constants.py index 121c396e7..2d2842b5e 100644 --- a/keepercommander/constants.py +++ b/keepercommander/constants.py @@ -49,6 +49,8 @@ ('keeper_endpoint_privilege_manager', 'Keeper Endpoint Privilege Manager (KEPM)', True, 'KEPM'), ] +KEPM_VALID_SEATS = {1, 25, 50, 100, 500, 1000, 5000, 10000} + class PrivilegeScope(enum.IntEnum): All = 1, From 6b5169f21b4c49a33a367960817d54f22a9ddfbd Mon Sep 17 00:00:00 2001 From: pvagare-ks Date: Wed, 14 Jan 2026 10:27:00 +0530 Subject: [PATCH 09/24] Fix --force flag (#1762) --- keepercommander/commands/ksm.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/keepercommander/commands/ksm.py b/keepercommander/commands/ksm.py index 4ffa0055c..59a13e442 100644 --- a/keepercommander/commands/ksm.py +++ b/keepercommander/commands/ksm.py @@ -331,7 +331,7 @@ def execute(self, params, **kwargs): if len(client_names_or_ids) == 1 and client_names_or_ids[0] in ['*', 'all']: KSMCommand.remove_all_clients(params, app_name_or_uid, force) else: - KSMCommand.remove_client(params, app_name_or_uid, client_names_or_ids) + KSMCommand.remove_client(params, app_name_or_uid, client_names_or_ids, force) return @@ -954,10 +954,10 @@ def remove_all_clients(params, app_name_or_uid, force): client_ids_to_rem = [utils.base64_url_encode(c.clientId) for ai in app_info for c in ai.clients if c.appClientType == enterprise_pb2.GENERAL] if len(client_ids_to_rem) > 0: - KSMCommand.remove_client(params, app_name_or_uid, client_ids_to_rem) + KSMCommand.remove_client(params, app_name_or_uid, client_ids_to_rem, force=True) @staticmethod - def remove_client(params, app_name_or_uid, client_names_and_hashes): + def remove_client(params, app_name_or_uid, client_names_and_hashes, force=False): def convert_ids_and_hashes_to_hashes(cnahs, app_uid): @@ -996,7 +996,8 @@ def convert_ids_and_hashes_to_hashes(cnahs, app_uid): if found_clients_count == 0: print(bcolors.WARNING + "No Client Devices found with given name or ID\n" + bcolors.ENDC) return - else: + + if not force: uc = user_choice(f'\tAre you sure you want to delete {found_clients_count} matching clients from this application?', 'yn', default='n') if uc.lower() != 'y': From 8b29863b15f9e043edc7a4787f484a925a05de3b Mon Sep 17 00:00:00 2001 From: Micah Roberts Date: Thu, 8 Jan 2026 09:33:58 -0700 Subject: [PATCH 10/24] Get the portForward port to use in tunneling --- keepercommander/commands/tunnel_and_connections.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keepercommander/commands/tunnel_and_connections.py b/keepercommander/commands/tunnel_and_connections.py index 8ff224d4f..0f07017fd 100644 --- a/keepercommander/commands/tunnel_and_connections.py +++ b/keepercommander/commands/tunnel_and_connections.py @@ -567,7 +567,7 @@ def execute(self, params, **kwargs): print(f"{bcolors.FAIL}Hostname not found for record {record_uid}.{bcolors.ENDC}") return target_host = target.get_default_value().get('hostName', None) - target_port = target.get_default_value().get('port', None) + target_port = pam_settings_value.get("portForward", {}).get("port", target.get_default_value().get('port', None)) if not target_host: print(f"{bcolors.FAIL}Host not found for record {record_uid}.{bcolors.ENDC}") return From 01adbd99b7039d6c52d92ef11db47933113ac00e Mon Sep 17 00:00:00 2001 From: pvagare-ks Date: Wed, 14 Jan 2026 15:33:15 +0530 Subject: [PATCH 11/24] `msp-info` command enhancement (#1763) --- keepercommander/commands/msp.py | 63 +++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/keepercommander/commands/msp.py b/keepercommander/commands/msp.py index f42db59a0..447f10f74 100644 --- a/keepercommander/commands/msp.py +++ b/keepercommander/commands/msp.py @@ -69,6 +69,8 @@ def register_command_info(aliases, command_info): msp_info_parser.add_argument('-r', '--restriction', dest='restriction', action='store_true', help='Display MSP restriction information') msp_info_parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Print details') +msp_info_parser.add_argument('-mc', '--managed-company', dest='managed_company', action='store', + help='Filter by specific managed company (name or id)') # msp_info_parser.add_argument('-u', '--users', dest='users', action='store_true', help='print user list') msp_update_parser = argparse.ArgumentParser(prog='msp-update', usage='msp-update', @@ -371,30 +373,77 @@ def execute(self, params, **kwargs): if 'managed_companies' in params.enterprise: sort_dict = {x[0]: i for i, x in enumerate(constants.MSP_ADDONS)} verbose = kwargs.get('verbose') + company_filter = kwargs.get('managed_company') + + # Filter by company if specified + managed_companies = params.enterprise['managed_companies'] + if company_filter: + filtered_mc = get_mc_by_name_or_id(managed_companies, company_filter) + if not filtered_mc: + raise CommandError('msp-info', f'Managed Company "{company_filter}" not found') + managed_companies = [filtered_mc] + header = ['company_id', 'company_name', 'node', 'plan', 'storage', 'addons', 'allocated', 'active'] + if verbose: + # Add node_name field for verbose mode + header.insert(3, 'node_name') + table = [] plan_map = {x[1]: x[2] for x in constants.MSP_PLANS} file_plan_map = {x[1]: x[2] for x in constants.MSP_FILE_PLANS} - for mc in params.enterprise['managed_companies']: + + for mc in managed_companies: node_id = mc['msp_node_id'] if verbose: node_path = str(node_id) + node_name = self.get_node_path(params, node_id, False) else: node_path = self.get_node_path(params, node_id, False) + node_name = None + file_plan = mc['file_plan_type'] file_plan = file_plan_map.get(file_plan, file_plan) - addons = [x['name'] for x in mc.get('add_ons', [])] - addons.sort(key=lambda x: sort_dict.get(x, -1)) + + # Process addons + addon_list = [] + for addon_obj in mc.get('add_ons', []): + addon_name = addon_obj['name'] + if verbose: + seats = addon_obj.get('seats', 0) + if seats > 0: + addon_def = next((x for x in constants.MSP_ADDONS if x[0] == addon_name), None) + if addon_def and addon_def[2]: # addon_def[2] indicates if seats are supported + display_seats = -1 if seats == 2147483647 else seats + addon_list.append(f"{addon_name}:{display_seats}") + else: + addon_list.append(addon_name) + else: + addon_list.append(addon_name) + else: + addon_list.append(addon_name) + + addon_list.sort(key=lambda x: sort_dict.get(x.split(':')[0], -1)) + if not verbose: - addons = len(addons) + addons = len(addon_list) + else: + addons = addon_list + plan = mc['product_id'] if not verbose: plan = plan_map.get(plan, plan) + seats = mc['number_of_seats'] if seats > 2000000: seats = None - table.append([mc['mc_enterprise_id'], mc['mc_enterprise_name'], node_path, - plan, file_plan, addons, seats, mc['number_of_users']]) + + if verbose: + table.append([mc['mc_enterprise_id'], mc['mc_enterprise_name'], node_path, + node_name, plan, file_plan, addons, seats, mc['number_of_users']]) + else: + table.append([mc['mc_enterprise_id'], mc['mc_enterprise_name'], node_path, + plan, file_plan, addons, seats, mc['number_of_users']]) + table.sort(key=lambda x: x[1].lower()) if report_format != 'json': header = [field_to_title(x) for x in header] @@ -1531,4 +1580,4 @@ def find_roles(params, name): # type: (KeeperParams, str) -> Iterable[Dict] for role in params.enterprise.get('roles') or []: role_name = role['data'].get('displayname') or '' if role_name.casefold() == name.casefold(): - yield role + yield role \ No newline at end of file From 21aa1204a1108d94069499812e3ee37abe3320b4 Mon Sep 17 00:00:00 2001 From: pvagare-ks Date: Wed, 14 Jan 2026 18:39:47 +0530 Subject: [PATCH 12/24] 2fa fix --- keepercommander/commands/two_fa.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keepercommander/commands/two_fa.py b/keepercommander/commands/two_fa.py index e80e5ca13..39e33cc7f 100644 --- a/keepercommander/commands/two_fa.py +++ b/keepercommander/commands/two_fa.py @@ -202,7 +202,7 @@ def execute(self, params, **kwargs): }, 'type': 'public-key', # 'transports': ['usb'], - 'clientExtensionResults': response.client_extension_results or {} + 'clientExtensionResults': dict(response.client_extension_results) if response.client_extension_results else {} } rq_yubikey = APIRequest_pb2.TwoFactorValidateRequest() rq_yubikey.valueType = APIRequest_pb2.TWO_FA_RESP_WEBAUTHN From 53062ed42a88ae79d3a57bcacf8903301aa4cf0c Mon Sep 17 00:00:00 2001 From: amangalampalli-ks Date: Thu, 15 Jan 2026 20:26:44 +0530 Subject: [PATCH 13/24] Automate Docker-Service Mode & Slack app configuration (#1758) (#1768) * service-docker-setup and slack-app-setup initial commit * Update unit-test cases * Update and refactor code based on PR comments * Add validation checks for each prompt in service configuration via docker * Update slack-app-setup model, yaml label and service config prompts * Update record type to login instead of server --------- Co-authored-by: pvagare-ks Co-authored-by: Micah Roberts --- keepercommander/command_categories.py | 2 +- keepercommander/commands/start_service.py | 8 +- keepercommander/rest_api.py | 16 +- .../service/commands/create_service.py | 68 ++- .../service/commands/service_docker_setup.py | 318 ++++++++++++++ .../service/commands/slack_app_setup.py | 393 ++++++++++++++++++ .../service/config/config_validation.py | 19 +- .../service/config/service_config.py | 3 + keepercommander/service/docker/__init__.py | 37 ++ .../service/docker/compose_builder.py | 204 +++++++++ keepercommander/service/docker/models.py | 101 +++++ keepercommander/service/docker/printer.py | 84 ++++ keepercommander/service/docker/setup_base.py | 318 ++++++++++++++ unit-tests/service/test_config_validation.py | 4 +- unit-tests/service/test_create_service.py | 32 +- 15 files changed, 1580 insertions(+), 27 deletions(-) create mode 100644 keepercommander/service/commands/service_docker_setup.py create mode 100644 keepercommander/service/commands/slack_app_setup.py create mode 100644 keepercommander/service/docker/__init__.py create mode 100644 keepercommander/service/docker/compose_builder.py create mode 100644 keepercommander/service/docker/models.py create mode 100644 keepercommander/service/docker/printer.py create mode 100644 keepercommander/service/docker/setup_base.py diff --git a/keepercommander/command_categories.py b/keepercommander/command_categories.py index 4789cc6c1..c28d7b1ed 100644 --- a/keepercommander/command_categories.py +++ b/keepercommander/command_categories.py @@ -83,7 +83,7 @@ # Service Mode REST API 'Service Mode REST API': { 'service-create', 'service-add-config', 'service-start', 'service-stop', 'service-status', - 'service-config-add' + 'service-config-add', 'service-docker-setup', 'slack-app-setup' }, # Email Configuration Commands diff --git a/keepercommander/commands/start_service.py b/keepercommander/commands/start_service.py index 4717edc83..62024e5e7 100644 --- a/keepercommander/commands/start_service.py +++ b/keepercommander/commands/start_service.py @@ -12,6 +12,8 @@ from ..service.commands.create_service import CreateService from ..service.commands.config_operation import AddConfigService from ..service.commands.handle_service import StartService, StopService, ServiceStatus +from ..service.commands.service_docker_setup import ServiceDockerSetupCommand +from ..service.commands.slack_app_setup import SlackAppSetupCommand def register_commands(commands): commands['service-create'] = CreateService() @@ -19,6 +21,8 @@ def register_commands(commands): commands['service-start'] = StartService() commands['service-stop'] = StopService() commands['service-status'] = ServiceStatus() + commands['service-docker-setup'] = ServiceDockerSetupCommand() + commands['slack-app-setup'] = SlackAppSetupCommand() def register_command_info(aliases, command_info): service_classes = [ @@ -26,7 +30,9 @@ def register_command_info(aliases, command_info): AddConfigService, StartService, StopService, - ServiceStatus + ServiceStatus, + ServiceDockerSetupCommand, + SlackAppSetupCommand ] for service_class in service_classes: diff --git a/keepercommander/rest_api.py b/keepercommander/rest_api.py index 315cf1d9a..e0b170edd 100644 --- a/keepercommander/rest_api.py +++ b/keepercommander/rest_api.py @@ -243,7 +243,21 @@ def execute_rest(context, endpoint, payload): run_request = True continue elif rs.status_code in (400, 500) and context.qrc_key_id is not None: - logging.warning(f"QRC request failed with {rs.status_code} error, falling back to EC encryption") + # Only fall back to EC if the error is QRC-specific + # Don't fall back for business logic errors (duplicate, access denied, etc.) + error_type = failure.get('error', '') + error_msg = failure.get('message', '').lower() + + # QRC-specific error indicators + qrc_errors = ['qrc', 'quantum', 'ml-kem', 'encryption', 'decryption', 'key'] + is_qrc_error = any(keyword in error_msg for keyword in qrc_errors) or error_type == 'crypto_error' + + # Business logic errors that shouldn't trigger fallback + business_errors = ['duplicate', 'already', 'permission', 'access', 'not found', 'invalid uid'] + is_business_error = any(keyword in error_msg for keyword in business_errors) + + if is_qrc_error and not is_business_error: + logging.warning(f"QRC encryption error ({error_msg}), falling back to EC encryption") context.disable_qrc() run_request = True continue diff --git a/keepercommander/service/commands/create_service.py b/keepercommander/service/commands/create_service.py index 612ccdb94..c77c062c0 100644 --- a/keepercommander/service/commands/create_service.py +++ b/keepercommander/service/commands/create_service.py @@ -33,6 +33,7 @@ class StreamlineArgs: fileformat : Optional[str] run_mode: Optional[str] queue_enabled: Optional[str] + update_vault_record: Optional[str] class CreateService(Command): """Command to create a new service configuration.""" @@ -72,6 +73,7 @@ def get_parser(self): parser.add_argument('-f', '--fileformat', type=str, help='file format') parser.add_argument('-rm', '--run_mode', type=str, help='run mode') parser.add_argument('-q', '--queue_enabled', type=str, help='enable request queue (y/n)') + parser.add_argument('-ur', '--update-vault-record', dest='update_vault_record', type=str, help='CSMD Config record UID to update with service metadata (Docker mode)') return parser def execute(self, params: KeeperParams, **kwargs) -> None: @@ -86,10 +88,15 @@ def execute(self, params: KeeperParams, **kwargs) -> None: config_data = self.service_config.create_default_config() - filtered_kwargs = {k: v for k, v in kwargs.items() if k in ['port', 'allowedip', 'deniedip', 'commands', 'ngrok', 'ngrok_custom_domain', 'cloudflare', 'cloudflare_custom_domain', 'certfile', 'certpassword', 'fileformat', 'run_mode', 'queue_enabled']} + filtered_kwargs = {k: v for k, v in kwargs.items() if k in ['port', 'allowedip', 'deniedip', 'commands', 'ngrok', 'ngrok_custom_domain', 'cloudflare', 'cloudflare_custom_domain', 'certfile', 'certpassword', 'fileformat', 'run_mode', 'queue_enabled', 'update_vault_record']} args = StreamlineArgs(**filtered_kwargs) self._handle_configuration(config_data, params, args) - self._create_and_save_record(config_data, params, args) + api_key = self._create_and_save_record(config_data, params, args) + + if args.update_vault_record and api_key: + actual_service_url = self._get_service_url(config_data) + self._update_vault_record_with_metadata(params, args.update_vault_record, actual_service_url, api_key) + self._upload_and_start_service(params) except ValidationError as e: @@ -107,7 +114,7 @@ def _handle_configuration(self, config_data: Dict[str, Any], params: KeeperParam self.config_handler.handle_interactive_config(config_data, params) self.security_handler.configure_security(config_data) - def _create_and_save_record(self, config_data: Dict[str, Any], params: KeeperParams, args: StreamlineArgs) -> None: + def _create_and_save_record(self, config_data: Dict[str, Any], params: KeeperParams, args: StreamlineArgs) -> Optional[str]: if args.port is None: self.config_handler._configure_run_mode(config_data) @@ -122,7 +129,60 @@ def _create_and_save_record(self, config_data: Dict[str, Any], params: KeeperPar if config_data.get("tls_certificate") == "y": self.service_config.save_cert_data(config_data, 'create') + # Return the API key for Docker mode + return record.get('api-key') + def _upload_and_start_service(self, params: KeeperParams) -> None: self.service_config.update_or_add_record(params) from ..core.service_manager import ServiceManager - ServiceManager.start_service() \ No newline at end of file + ServiceManager.start_service() + + def _get_service_url(self, config_data: Dict[str, Any]) -> str: + """Determine the actual service URL (ngrok, cloudflare, or localhost)""" + # Priority: ngrok > cloudflare > localhost + if config_data.get("ngrok_public_url"): + return config_data["ngrok_public_url"] + elif config_data.get("cloudflare_public_url"): + return config_data["cloudflare_public_url"] + else: + # Fallback to localhost with correct protocol + port = config_data.get("port", 8080) + protocol = "https" if config_data.get("tls_certificate") == "y" else "http" + return f"{protocol}://localhost:{port}" + + def _update_vault_record_with_metadata(self, params: KeeperParams, record_uid: str, service_url: str, api_key: str) -> None: + """Update CSMD Config vault record with service URL and API key as custom fields (Docker mode only)""" + try: + from ... import vault, record_management, api + + logger.debug(f"Updating vault record {record_uid} with service metadata...") + + # Load the CSMD Config record + record = vault.KeeperRecord.load(params, record_uid) + + # Add custom fields for service URL and API key + # service_url as URL field, api_key as secret field (hidden) + custom_fields = [ + vault.TypedField.new_field('url', service_url, 'service_url'), + vault.TypedField.new_field('secret', api_key, 'api_key'), + ] + + # Preserve existing custom fields if any + if hasattr(record, 'custom') and record.custom: + # Remove old service_url and api_key fields if they exist + existing_fields = [f for f in record.custom if f.label not in ['service_url', 'api_key']] + record.custom = existing_fields + custom_fields + else: + record.custom = custom_fields + + # Update the record + record_management.update_record(params, record) + params.sync_data = True + api.sync_down(params) + + logger.debug(f"Successfully updated vault record with service metadata") + + except Exception as e: + logger.error(f"Failed to update vault record with service metadata: {e}") + # Don't fail the whole service-create if vault update fails + logger.warning(f"Could not update vault record with service metadata: {e}") \ No newline at end of file diff --git a/keepercommander/service/commands/service_docker_setup.py b/keepercommander/service/commands/service_docker_setup.py new file mode 100644 index 000000000..aec0dd2a7 --- /dev/null +++ b/keepercommander/service/commands/service_docker_setup.py @@ -0,0 +1,318 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' ServiceConfig: + """Interactively get service configuration from user""" + DockerSetupPrinter.print_header("Service Mode Configuration") + + # Port + port = self._get_port_config() + + # Commands + commands = self._get_commands_config(params) + + # Queue mode + queue_enabled = self._get_queue_config() + + # Tunneling options (ngrok/cloudflare are mutually exclusive) + ngrok_config = self._get_ngrok_config() + + if not ngrok_config['ngrok_enabled']: + cloudflare_config = self._get_cloudflare_config() + + # TLS only if no tunneling + if not cloudflare_config['cloudflare_enabled']: + tls_config = self._get_tls_config() + else: + tls_config = {'tls_enabled': False, 'cert_file': '', 'cert_password': ''} + else: + cloudflare_config = { + 'cloudflare_enabled': False, 'cloudflare_tunnel_token': '', 'cloudflare_custom_domain': '' + } + tls_config = {'tls_enabled': False, 'cert_file': '', 'cert_password': ''} + + return ServiceConfig( + port=port, + commands=commands, + queue_enabled=queue_enabled, + ngrok_enabled=ngrok_config['ngrok_enabled'], + ngrok_auth_token=ngrok_config['ngrok_auth_token'], + ngrok_custom_domain=ngrok_config['ngrok_custom_domain'], + cloudflare_enabled=cloudflare_config['cloudflare_enabled'], + cloudflare_tunnel_token=cloudflare_config['cloudflare_tunnel_token'], + cloudflare_custom_domain=cloudflare_config['cloudflare_custom_domain'], + tls_enabled=tls_config['tls_enabled'], + cert_file=tls_config['cert_file'], + cert_password=tls_config['cert_password'] + ) + + def generate_docker_compose_yaml(self, setup_result: SetupResult, config: ServiceConfig) -> str: + """Generate docker-compose.yml content for Commander service""" + builder = DockerComposeBuilder(setup_result, asdict(config)) + return builder.build() + + def generate_and_save_docker_compose(self, setup_result: SetupResult, config: ServiceConfig) -> str: + """Generate and save docker-compose.yml file""" + print(f"\n{bcolors.BOLD}Generating docker-compose.yml...{bcolors.ENDC}") + yaml_content = self.generate_docker_compose_yaml(setup_result, config) + compose_file = os.path.join(os.getcwd(), 'docker-compose.yml') + + with open(compose_file, 'w') as f: + f.write(yaml_content) + + DockerSetupPrinter.print_success(f"docker-compose.yml created at {compose_file}", indent=True) + + return compose_file + + def print_standalone_success_message(self, setup_result: SetupResult, config: ServiceConfig, config_path: str) -> None: + """Print success message for standalone service-docker-setup command""" + print(f"\n{bcolors.BOLD}Resources Created:{bcolors.ENDC}") + DockerSetupPrinter.print_phase1_resources(setup_result) + + self._print_next_steps(config, config_path) + + def _print_next_steps(self, config: ServiceConfig, config_path: str) -> None: + """Print next steps for deployment""" + DockerSetupPrinter.print_common_deployment_steps(str(config.port), config_path) + print() # Add trailing newline + + # ======================== + # Configuration Input Methods + # ======================== + + def _get_port_config(self) -> int: + """Get and validate port configuration""" + print(f"{bcolors.BOLD}\nPort:{bcolors.ENDC}") + print(f" The port on which Commander Service will listen") + while True: + port_input = input(f"{bcolors.OKBLUE}Port [Press Enter for {DockerSetupConstants.DEFAULT_PORT}]:{bcolors.ENDC} ").strip() or str(DockerSetupConstants.DEFAULT_PORT) + try: + return ConfigValidator.validate_port(port_input) + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + + def _get_commands_config(self, params) -> str: + """Get and validate commands configuration""" + from ..config.service_config import ServiceConfig + + service_config = ServiceConfig() + + print(f"\n{bcolors.BOLD}Allowed Commands:{bcolors.ENDC}") + print(f" Enter comma-separated commands (e.g., search,share-record,record-add)") + + while True: + commands = input(f"{bcolors.OKBLUE}Commands [Press Enter for '{DockerSetupConstants.DEFAULT_COMMANDS}']:{bcolors.ENDC} ").strip() or DockerSetupConstants.DEFAULT_COMMANDS + + try: + return service_config.validate_command_list(commands, params) + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + print(f"{bcolors.WARNING}Please try again with valid commands.{bcolors.ENDC}") + + def _get_queue_config(self) -> bool: + """Get queue mode configuration""" + print(f"\n{bcolors.BOLD}Queue Mode:{bcolors.ENDC}") + print(f" Queue mode enables async API (v2) for better performance") + queue_input = input(f"{bcolors.OKBLUE}Enable queue mode? [Press Enter for Yes] (y/n):{bcolors.ENDC} ").strip().lower() + return queue_input != 'n' + + def _get_ngrok_config(self) -> Dict[str, Any]: + """Get ngrok configuration""" + print(f"\n{bcolors.BOLD}Ngrok Tunneling (optional):{bcolors.ENDC}") + print(f" Generate a public URL for your service using ngrok") + use_ngrok = input(f"{bcolors.OKBLUE}Enable ngrok? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + + config = {'ngrok_enabled': use_ngrok, 'ngrok_auth_token': '', 'ngrok_custom_domain': ''} + + if use_ngrok: + while True: + token = input(f"{bcolors.OKBLUE}Ngrok auth token:{bcolors.ENDC} ").strip() + try: + config['ngrok_auth_token'] = ConfigValidator.validate_ngrok_token(token) + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + + # Validate custom domain if provided (ngrok allows subdomain prefixes) + domain = input(f"{bcolors.OKBLUE}Ngrok custom domain [Press Enter to skip]:{bcolors.ENDC} ").strip() + if domain: + while True: + try: + config['ngrok_custom_domain'] = ConfigValidator.validate_domain(domain, require_tld=False) + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + domain = input(f"{bcolors.OKBLUE}Ngrok custom domain [Press Enter to skip]:{bcolors.ENDC} ").strip() + if not domain: + break + + return config + + def _get_cloudflare_config(self) -> Dict[str, Any]: + """Get Cloudflare configuration""" + print(f"\n{bcolors.BOLD}Cloudflare Tunneling (optional):{bcolors.ENDC}") + print(f" Generate a public URL for your service using Cloudflare") + use_cloudflare = input(f"{bcolors.OKBLUE}Enable Cloudflare? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + + config = {'cloudflare_enabled': use_cloudflare, 'cloudflare_tunnel_token': '', 'cloudflare_custom_domain': ''} + + if use_cloudflare: + while True: + token = input(f"{bcolors.OKBLUE}Cloudflare tunnel token:{bcolors.ENDC} ").strip() + try: + config['cloudflare_tunnel_token'] = ConfigValidator.validate_cloudflare_token(token) + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + + while True: + domain = input(f"{bcolors.OKBLUE}Cloudflare custom domain:{bcolors.ENDC} ").strip() + try: + config['cloudflare_custom_domain'] = ConfigValidator.validate_domain(domain) + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + + return config + + def _get_tls_config(self) -> Dict[str, Any]: + """Get TLS configuration""" + print(f"\n{bcolors.BOLD}TLS Certificate (optional):{bcolors.ENDC}") + print(f" Use custom TLS certificate for HTTPS") + use_tls = input(f"{bcolors.OKBLUE}Enable TLS? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + + config = {'tls_enabled': use_tls, 'cert_file': '', 'cert_password': ''} + + if use_tls: + while True: + cert_file = input(f"{bcolors.OKBLUE}Certificate file path:{bcolors.ENDC} ").strip() + try: + if cert_file and os.path.exists(cert_file): + config['cert_file'] = ConfigValidator.validate_cert_file(cert_file) + break + print(f"{bcolors.FAIL}Error: Certificate file not found{bcolors.ENDC}") + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + + # Certificate password validation (optional) + cert_password = input(f"{bcolors.OKBLUE}Certificate password:{bcolors.ENDC} ").strip() + if cert_password: + while True: + try: + config['cert_password'] = ConfigValidator.validate_certpassword(cert_password) + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + cert_password = input(f"{bcolors.OKBLUE}Certificate password:{bcolors.ENDC} ").strip() + if not cert_password: + break + + return config + + def _get_config_path(self, config_path: str = None) -> str: + """Get and validate config file path""" + if not config_path: + config_path = os.path.expanduser('~/.keeper/config.json') + + if not os.path.isfile(config_path): + raise CommandError('service-docker-setup', f'Config file not found: {config_path}') + + return config_path diff --git a/keepercommander/service/commands/slack_app_setup.py b/keepercommander/service/commands/slack_app_setup.py new file mode 100644 index 000000000..e8f4a7dcc --- /dev/null +++ b/keepercommander/service/commands/slack_app_setup.py @@ -0,0 +1,393 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' Tuple[SetupResult, Dict[str, Any], str]: + """ + Run the base Docker setup using ServiceDockerSetupCommand. + Returns (SetupResult, service_config, config_path) + """ + docker_cmd = ServiceDockerSetupCommand() + + # Determine config path + config_path = kwargs.get('config_path') or os.path.expanduser('~/.keeper/config.json') + if not os.path.isfile(config_path): + raise CommandError('slack-app-setup', f'Config file not found: {config_path}') + + # Print header + DockerSetupPrinter.print_header("Docker Setup") + + # Run core setup steps (Steps 1-7) + setup_result = docker_cmd.run_setup_steps( + params=params, + folder_name=kwargs.get('folder_name', DockerSetupConstants.DEFAULT_FOLDER_NAME), + app_name=kwargs.get('app_name', DockerSetupConstants.DEFAULT_APP_NAME), + record_name=kwargs.get('config_record_name', DockerSetupConstants.DEFAULT_RECORD_NAME), + config_path=config_path, + timeout=kwargs.get('timeout', DockerSetupConstants.DEFAULT_TIMEOUT), + skip_device_setup=kwargs.get('skip_device_setup', False) + ) + + DockerSetupPrinter.print_completion("Docker Setup Complete!") + + # Get simplified service configuration for Slack App + service_config = self._get_slack_service_configuration() + + # Generate initial docker-compose.yml + docker_cmd.generate_and_save_docker_compose(setup_result, service_config) + + return setup_result, service_config, config_path + + def _get_slack_service_configuration(self) -> ServiceConfig: + """Get simplified service configuration for Slack App (only port needed)""" + DockerSetupPrinter.print_header("Service Mode Configuration") + + # Only ask for port with validation + print(f"{bcolors.BOLD}Port:{bcolors.ENDC}") + print(f" The port on which Commander Service will listen") + while True: + port_input = input(f"{bcolors.OKBLUE}Port [Press Enter for {DockerSetupConstants.DEFAULT_PORT}]:{bcolors.ENDC} ").strip() or str(DockerSetupConstants.DEFAULT_PORT) + try: + port = ConfigValidator.validate_port(port_input) + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + + # Fixed configuration for Slack App + return ServiceConfig( + port=port, + commands='search,share-record,share-folder,record-add,one-time-share,pedm,device-approve,get', + queue_enabled=True, # Always enable queue mode (v2 API) + ngrok_enabled=False, + ngrok_auth_token='', + ngrok_custom_domain='', + cloudflare_enabled=False, + cloudflare_tunnel_token='', + cloudflare_custom_domain='', + tls_enabled=False, + cert_file='', + cert_password='' + ) + + def _run_slack_setup(self, params, setup_result: SetupResult, service_config: ServiceConfig, + slack_record_name: str) -> Tuple[str, SlackConfig]: + """ + Run Slack-specific setup steps. + Returns (slack_record_uid, slack_config) + """ + # Get Slack configuration + DockerSetupPrinter.print_header("Slack App Configuration") + slack_config = self._get_slack_configuration() + + # Create Slack record + DockerSetupPrinter.print_step(1, 2, f"Creating Slack config record '{slack_record_name}'...") + slack_record_uid = self._create_slack_record( + params, + slack_record_name, + setup_result.folder_uid, + slack_config + ) + + # Update docker-compose.yml + DockerSetupPrinter.print_step(2, 2, "Updating docker-compose.yml with Slack App service...") + self._update_docker_compose_yaml(setup_result, service_config, slack_record_uid) + + return slack_record_uid, slack_config + + def _get_slack_configuration(self) -> SlackConfig: + """Interactively get Slack configuration from user""" + # Slack App Token + print(f"\n{bcolors.BOLD}SLACK_APP_TOKEN:{bcolors.ENDC}") + print(f" App-level token for Slack App") + slack_app_token = self._prompt_with_validation( + "Token (starts with xapp-):", + lambda t: t and t.startswith('xapp-') and len(t) >= 90, + "Invalid Slack App Token (must start with 'xapp-' and be at least 90 chars)" + ) + + # Slack Bot Token + print(f"\n{bcolors.BOLD}SLACK_BOT_TOKEN:{bcolors.ENDC}") + print(f" Bot token for Slack workspace") + slack_bot_token = self._prompt_with_validation( + "Token (starts with xoxb-):", + lambda t: t and t.startswith('xoxb-') and len(t) >= 50, + "Invalid Slack Bot Token (must start with 'xoxb-' and be at least 50 chars)" + ) + + # Slack Signing Secret + print(f"\n{bcolors.BOLD}SLACK_SIGNING_SECRET:{bcolors.ENDC}") + print(f" Signing secret for verifying Slack requests") + slack_signing_secret = self._prompt_with_validation( + "Secret:", + lambda s: s and len(s) == 32, + "Invalid Slack Signing Secret (must be exactly 32 characters)" + ) + + # Approvals Channel ID + print(f"\n{bcolors.BOLD}APPROVALS_CHANNEL_ID:{bcolors.ENDC}") + print(f" Slack channel ID for approval notifications") + approvals_channel_id = self._prompt_with_validation( + "Channel ID (starts with C):", + lambda c: c and c.startswith('C'), + "Invalid Approvals Channel ID (must start with 'C')" + ) + + # PEDM Integration (optional) + print(f"\n{bcolors.BOLD}PEDM (Endpoint Privilege Manager) Integration (optional):{bcolors.ENDC}") + print(f" Integrate with Keeper PEDM for privilege elevation") + pedm_enabled = input(f"{bcolors.OKBLUE}Enable PEDM? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + pedm_polling_interval = 120 + if pedm_enabled: + interval_input = input(f"{bcolors.OKBLUE}PEDM polling interval in seconds [Press Enter for 120]:{bcolors.ENDC} ").strip() + pedm_polling_interval = int(interval_input) if interval_input else 120 + + # Device Approval Integration (optional) + print(f"\n{bcolors.BOLD}SSO Cloud Device Approval Integration (optional):{bcolors.ENDC}") + print(f" Approve SSO Cloud device registrations via Slack") + device_approval_enabled = input(f"{bcolors.OKBLUE}Enable Device Approval? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + device_approval_polling_interval = 120 + if device_approval_enabled: + interval_input = input(f"{bcolors.OKBLUE}Device approval polling interval in seconds [Press Enter for 120]:{bcolors.ENDC} ").strip() + device_approval_polling_interval = int(interval_input) if interval_input else 120 + + print(f"\n{bcolors.OKGREEN}{bcolors.BOLD}✓ Slack Configuration Complete!{bcolors.ENDC}") + + return SlackConfig( + slack_app_token=slack_app_token, + slack_bot_token=slack_bot_token, + slack_signing_secret=slack_signing_secret, + approvals_channel_id=approvals_channel_id, + pedm_enabled=pedm_enabled, + pedm_polling_interval=pedm_polling_interval, + device_approval_enabled=device_approval_enabled, + device_approval_polling_interval=device_approval_polling_interval + ) + + def _prompt_with_validation(self, prompt: str, validator, error_msg: str) -> str: + """Helper method to prompt user input with validation""" + while True: + value = input(f"{bcolors.OKBLUE}{prompt}{bcolors.ENDC} ").strip() + if validator(value): + return value + print(f"{bcolors.FAIL}Error: {error_msg}{bcolors.ENDC}") + + def _create_slack_record(self, params, record_name: str, folder_uid: str, + slack_config: SlackConfig) -> str: + """Create or update Slack configuration record""" + # Check if record exists + record_uid = self._find_existing_record(params, folder_uid, record_name) + + if record_uid: + DockerSetupPrinter.print_success("Using existing record (will update with custom fields)") + else: + # Create new record + record_uid = self._create_basic_slack_record(params, folder_uid, record_name) + + # Update record with custom fields + self._update_slack_record_fields(params, record_uid, slack_config) + + DockerSetupPrinter.print_success(f"Slack config record ready (UID: {record_uid})") + return record_uid + + def _find_existing_record(self, params, folder_uid: str, record_name: str) -> Optional[str]: + """Find existing record by name in folder""" + if folder_uid in params.subfolder_record_cache: + for rec_uid in params.subfolder_record_cache[folder_uid]: + rec = api.get_record(params, rec_uid) + if rec.title == record_name: + return rec_uid + return None + + def _create_basic_slack_record(self, params, folder_uid: str, record_name: str) -> str: + """Create a basic login record for Slack configuration""" + try: + from ..config.cli_handler import CommandHandler + + cli_handler = CommandHandler() + cmd_add = f"record-add --folder='{folder_uid}' --title='{record_name}' --record-type=login" + cli_handler.execute_cli_command(params, cmd_add) + + api.sync_down(params) + + # Find the created record + record_uid = self._find_existing_record(params, folder_uid, record_name) + if not record_uid: + raise CommandError('slack-app-setup', 'Failed to find created Slack record') + + return record_uid + except Exception as e: + raise CommandError('slack-app-setup', f'Failed to create Slack record: {str(e)}') + + def _update_slack_record_fields(self, params, record_uid: str, slack_config: SlackConfig) -> None: + """Update record with Slack configuration custom fields""" + try: + record = vault.KeeperRecord.load(params, record_uid) + + # Add custom fields (secret fields are masked, text fields are visible) + record.custom = [ + vault.TypedField.new_field('secret', slack_config.slack_app_token, 'slack_app_token'), + vault.TypedField.new_field('secret', slack_config.slack_bot_token, 'slack_bot_token'), + vault.TypedField.new_field('secret', slack_config.slack_signing_secret, 'slack_signing_secret'), + vault.TypedField.new_field('text', slack_config.approvals_channel_id, 'approvals_channel_id'), + vault.TypedField.new_field('text', 'true' if slack_config.pedm_enabled else 'false', 'pedm_enabled'), + vault.TypedField.new_field('text', str(slack_config.pedm_polling_interval), 'pedm_polling_interval'), + vault.TypedField.new_field('text', 'true' if slack_config.device_approval_enabled else 'false', 'device_approval_enabled'), + vault.TypedField.new_field('text', str(slack_config.device_approval_polling_interval), 'device_approval_polling_interval'), + ] + + record_management.update_record(params, record) + params.sync_data = True + api.sync_down(params) + + except Exception as e: + raise CommandError('slack-app-setup', f'Failed to update Slack record fields: {str(e)}') + + def _update_docker_compose_yaml(self, setup_result: SetupResult, service_config: ServiceConfig, + slack_record_uid: str) -> None: + """Regenerate docker-compose.yml with Slack app service""" + compose_file = os.path.join(os.getcwd(), 'docker-compose.yml') + + if not os.path.exists(compose_file): + raise CommandError('slack-app-setup', f'docker-compose.yml not found at {compose_file}') + + try: + # Check if slack-app already exists + with open(compose_file, 'r') as f: + content = f.read() + + if 'slack-app:' in content: + DockerSetupPrinter.print_warning("slack-app service already exists in docker-compose.yml") + return + + # Regenerate docker-compose.yml with both Commander and Slack App + builder = DockerComposeBuilder(setup_result, asdict(service_config)) + yaml_content = builder.add_slack_service(slack_record_uid).build() + + with open(compose_file, 'w') as f: + f.write(yaml_content) + + DockerSetupPrinter.print_success("docker-compose.yml updated successfully") + + except Exception as e: + raise CommandError('slack-app-setup', f'Failed to update docker-compose.yml: {str(e)}') + + def _print_success_message(self, setup_result: SetupResult, service_config: ServiceConfig, + slack_record_uid: str, slack_config: SlackConfig, config_path: str) -> None: + """Print consolidated success message for both phases""" + print(f"\n{bcolors.OKGREEN}{bcolors.BOLD}✓ Slack App Integration Setup Complete!{bcolors.ENDC}\n") + + # Resources created + print(f"{bcolors.BOLD}Resources Created:{bcolors.ENDC}") + print(f" {bcolors.BOLD}Phase 1 - Commander Service:{bcolors.ENDC}") + DockerSetupPrinter.print_phase1_resources(setup_result, indent=" ") + print(f" {bcolors.BOLD}Phase 2 - Slack App:{bcolors.ENDC}") + print(f" • Slack Config Record: {bcolors.OKBLUE}{slack_record_uid}{bcolors.ENDC}") + print(f" • Approvals Channel: {bcolors.OKBLUE}{slack_config.approvals_channel_id}{bcolors.ENDC}") + print(f" • PEDM Integration: {bcolors.OKBLUE}{'true' if slack_config.pedm_enabled else 'false'}{bcolors.ENDC}") + print(f" • Device Approval: {bcolors.OKBLUE}{'true' if slack_config.device_approval_enabled else 'false'}{bcolors.ENDC}") + + # Next steps + self._print_next_steps(service_config, config_path) + + def _print_next_steps(self, service_config: ServiceConfig, config_path: str) -> None: + """Print deployment next steps for Slack integration""" + DockerSetupPrinter.print_common_deployment_steps(str(service_config.port), config_path) + + # Slack-specific logs + print(f" {bcolors.OKGREEN}docker logs keeper-slack-app{bcolors.ENDC} - View Slack App logs") + + # Slack-specific commands + print(f"\n{bcolors.BOLD}Slack Commands Available:{bcolors.ENDC}") + print(f" {bcolors.OKGREEN}• /keeper-request-record{bcolors.ENDC} - Request access to a record") + print(f" {bcolors.OKGREEN}• /keeper-request-folder{bcolors.ENDC} - Request access to a folder") + print(f" {bcolors.OKGREEN}• /keeper-one-time-share{bcolors.ENDC} - Request a one-time share link\n") diff --git a/keepercommander/service/config/config_validation.py b/keepercommander/service/config/config_validation.py index 3e627fcad..e82b3ec04 100644 --- a/keepercommander/service/config/config_validation.py +++ b/keepercommander/service/config/config_validation.py @@ -20,7 +20,7 @@ class ConfigValidator: """Validator class for service configuration""" - MIN_PORT = 0 + MIN_PORT = 1024 # Minimum non-privileged port MAX_PORT = 65535 @staticmethod @@ -123,9 +123,16 @@ def validate_cloudflare_token(token: str) -> str: return token @staticmethod - def validate_domain(domain: str) -> str: - """Validate domain name format""" - logger.debug(f"Validating domain: {domain}") + def validate_domain(domain: str, require_tld: bool = True) -> str: + """ + Validate domain name format. + + Args: + domain: Domain name to validate + require_tld: If True, requires a full domain with TLD (e.g., example.com) + If False, allows subdomain prefixes (e.g., myapp for ngrok) + """ + logger.debug(f"Validating domain: {domain} (require_tld={require_tld})") if not domain or not domain.strip(): raise ValidationError("Domain cannot be empty") @@ -147,8 +154,8 @@ def validate_domain(domain: str) -> str: if label.startswith('-') or label.endswith('-'): raise ValidationError(f"Domain label '{label}' cannot start or end with hyphen") - if '.' not in domain: - raise ValidationError("Please provide a valid domain name") + if require_tld and '.' not in domain: + raise ValidationError("Please provide a valid domain name (e.g., example.com)") logger.debug("Domain validation successful") return domain diff --git a/keepercommander/service/config/service_config.py b/keepercommander/service/config/service_config.py index 1032b1b82..b72f16b6c 100644 --- a/keepercommander/service/config/service_config.py +++ b/keepercommander/service/config/service_config.py @@ -250,6 +250,9 @@ def _validate_config_structure(self, config: Dict[str, Any]) -> None: logger.debug("Validating ngrok configuration") self.validator.validate_ngrok_token(config_data.ngrok_auth_token) + if config_data.ngrok_custom_domain: + self.validator.validate_domain(config_data.ngrok_custom_domain, require_tld=False) + if config_data.cloudflare == 'y': logger.debug("Validating cloudflare configuration") self.validator.validate_cloudflare_token(config_data.cloudflare_tunnel_token) diff --git a/keepercommander/service/docker/__init__.py b/keepercommander/service/docker/__init__.py new file mode 100644 index 000000000..d10db96ce --- /dev/null +++ b/keepercommander/service/docker/__init__.py @@ -0,0 +1,37 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' str: + """ + Build the complete docker-compose.yml content + + Returns: + YAML content as a string + """ + if 'commander' not in self._services: + self._services['commander'] = self._build_commander_service() + return self.to_yaml() + + def build_dict(self) -> Dict[str, Any]: + """ + Build the docker-compose structure as a dictionary + + Returns: + Dictionary structure ready for YAML serialization + """ + if 'commander' not in self._services: + self._services['commander'] = self._build_commander_service() + return {'services': self._services} + + def add_slack_service(self, slack_record_uid: str) -> 'DockerComposeBuilder': + """ + Add Slack App service to the compose configuration + + Args: + slack_record_uid: UID of the Slack config record + + Returns: + Self for method chaining + """ + # Ensure commander service exists first + if 'commander' not in self._services: + self._services['commander'] = self._build_commander_service() + # Add slack service + self._services['slack-app'] = self._build_slack_service(slack_record_uid) + return self + + def _build_commander_service(self) -> Dict[str, Any]: + """Build the Commander service configuration""" + self._build_service_command() + + service = { + 'container_name': 'keeper-service', + 'ports': [f"{self.config['port']}:{self.config['port']}"], + 'image': 'keeper/commander:latest', + 'command': ' '.join(self._service_cmd_parts), + 'healthcheck': self._build_healthcheck(), + 'restart': 'unless-stopped' + } + + if self._volumes: + service['volumes'] = self._volumes + + return service + + def _build_slack_service(self, slack_record_uid: str) -> Dict[str, Any]: + """Build the Slack App service configuration""" + return { + 'container_name': 'keeper-slack-app', + 'image': 'keeper/slack-app:latest', + 'environment': { + 'KSM_CONFIG': self.setup_result.b64_config, + 'COMMANDER_RECORD': self.setup_result.record_uid, + 'SLACK_RECORD': slack_record_uid + }, + 'depends_on': { + 'commander': { + 'condition': 'service_healthy' + } + }, + 'restart': 'unless-stopped' + } + + def _build_service_command(self) -> None: + """Build the service-create command parts""" + port = self.config['port'] + commands = self.config['commands'] + queue_enabled = self.config.get('queue_enabled', True) + + self._service_cmd_parts = [ + f"service-create -p {port}", + f"-c '{commands}'", + "-f json", + f"-q {'y' if queue_enabled else 'n'}" + ] + + self._add_tunneling_options() + self._add_tls_options() + self._add_docker_options() + + def _add_tunneling_options(self) -> None: + """Add ngrok and Cloudflare tunneling options""" + # Ngrok configuration + if self.config.get('ngrok_enabled') and self.config.get('ngrok_token'): + self._service_cmd_parts.append(f"-ng {self.config['ngrok_token']}") + if self.config.get('ngrok_domain'): + self._service_cmd_parts.append(f"-cd {self.config['ngrok_domain']}") + + # Cloudflare configuration + if self.config.get('cloudflare_enabled') and self.config.get('cloudflare_token'): + self._service_cmd_parts.append(f"-cf {self.config['cloudflare_token']}") + if self.config.get('cloudflare_domain'): + self._service_cmd_parts.append(f"-cfd {self.config['cloudflare_domain']}") + + def _add_tls_options(self) -> None: + """Add TLS certificate options and volumes""" + if self.config.get('tls_enabled') and self.config.get('cert_file'): + cert_file = self.config['cert_file'] + cert_basename = os.path.basename(cert_file) + + self._service_cmd_parts.append(f"-crtf /certs/{cert_basename}") + if self.config.get('cert_password'): + self._service_cmd_parts.append(f"-crtp {self.config['cert_password']}") + + # Add volume mount for certificate + self._volumes.append(f"{cert_file}:/certs/{cert_basename}:ro") + + def _add_docker_options(self) -> None: + """Add Docker-specific parameters (KSM config, record UIDs)""" + self._service_cmd_parts.extend([ + f"-ur {self.setup_result.record_uid}", + f"--ksm-config {self.setup_result.b64_config}", + f"--record {self.setup_result.record_uid}" + ]) + + def _build_healthcheck(self) -> Dict[str, Any]: + """Build the healthcheck configuration""" + port = self.config['port'] + + # Build the Python script as a single-line command + health_script = ( + f"python -c \"import sys, urllib.request; " + f"sys.exit(0 if urllib.request.urlopen('http://localhost:{port}/health', timeout=2).status == 200 else 1)\"" + ) + + return { + 'test': ['CMD-SHELL', health_script], + 'interval': '60s', + 'timeout': '3s', + 'start_period': '10s', + 'retries': 30 + } + + def to_yaml(self) -> str: + """ + Convert the docker-compose structure to YAML string + + Returns: + YAML formatted string + """ + try: + import yaml + except ImportError: + # Fallback if PyYAML is not installed + raise ImportError("PyYAML is required for YAML generation. Install it with: pip install PyYAML") + + compose_dict = self.build_dict() + + # Use yaml.dump with proper settings + return yaml.dump( + compose_dict, + default_flow_style=False, + sort_keys=False, + indent=2, + width=float("inf") # Prevent line wrapping + ) + diff --git a/keepercommander/service/docker/models.py b/keepercommander/service/docker/models.py new file mode 100644 index 000000000..44824f9a2 --- /dev/null +++ b/keepercommander/service/docker/models.py @@ -0,0 +1,101 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' None: + """Print a formatted header""" + separator = "═" * 59 + print(f"\n{bcolors.BOLD}{separator}{bcolors.ENDC}") + print(f"{bcolors.BOLD} {title}{bcolors.ENDC}") + print(f"{bcolors.BOLD}{separator}{bcolors.ENDC}") + + @staticmethod + def print_step(step_num: int, total_steps: int, message: str) -> None: + """Print a step indicator""" + print(f"\n{bcolors.OKBLUE}[{step_num}/{total_steps}]{bcolors.ENDC} {message}") + + @staticmethod + def print_success(message: str, indent: bool = True) -> None: + """Print a success message""" + prefix = " " if indent else "" + print(f"{prefix}{bcolors.OKGREEN}✓{bcolors.ENDC} {message}") + + @staticmethod + def print_warning(message: str, indent: bool = True) -> None: + """Print a warning message""" + prefix = " " if indent else "" + print(f"{prefix}{bcolors.WARNING}⚠{bcolors.ENDC} {message}") + + @staticmethod + def print_completion(message: str) -> None: + """Print a completion message""" + print(f"\n{bcolors.OKGREEN}{bcolors.BOLD}✓ {message}{bcolors.ENDC}") + + @staticmethod + def print_phase1_resources(setup_result: SetupResult, indent: str = " ") -> None: + """Print Phase 1 resources created (folder, app, record, config)""" + print(f"{indent}• Shared Folder: {bcolors.OKBLUE}{setup_result.folder_name}{bcolors.ENDC}") + print(f"{indent}• KSM App: {bcolors.OKBLUE}{setup_result.app_name}{bcolors.ENDC} (with edit permissions)") + print(f"{indent}• Config Record: {bcolors.OKBLUE}{setup_result.record_uid}{bcolors.ENDC}") + print(f"{indent}• KSM Base64 Config: {bcolors.OKGREEN}✓ Generated{bcolors.ENDC}") + + @staticmethod + def print_common_deployment_steps(port: str, config_path: str = None) -> None: + """Print common deployment steps (header + steps 1-5)""" + DockerSetupPrinter.print_header("Next Steps to Deploy") + + print(f"\n{bcolors.BOLD}Step 1: Quit from this session{bcolors.ENDC}") + print(f" {bcolors.OKGREEN}quit{bcolors.ENDC}") + + config_file = config_path if config_path else '~/.keeper/config.json' + print(f"\n{bcolors.BOLD}Step 2: Delete the local config.json file{bcolors.ENDC}") + print(f" {bcolors.OKGREEN}rm {config_file}{bcolors.ENDC}") + print(f" Why? Prevents device token conflicts - Docker will download its own config.") + + print(f"\n{bcolors.BOLD}Step 3: Review docker-compose.yml{bcolors.ENDC}") + print(f" {bcolors.OKGREEN}cat docker-compose.yml{bcolors.ENDC}") + + print(f"\n{bcolors.BOLD}Step 4: Start the services{bcolors.ENDC}") + print(f" {bcolors.OKGREEN}docker compose up -d{bcolors.ENDC}") + + print(f"\n{bcolors.BOLD}Step 5: Check services health{bcolors.ENDC}") + print(f" {bcolors.OKGREEN}docker ps{bcolors.ENDC} - View container status") + print(f" {bcolors.OKGREEN}docker logs keeper-service{bcolors.ENDC} - View Commander logs") + print(f" {bcolors.OKGREEN}curl http://localhost:{port}/health{bcolors.ENDC} - Test health endpoint") + diff --git a/keepercommander/service/docker/setup_base.py b/keepercommander/service/docker/setup_base.py new file mode 100644 index 000000000..15b9a4c3f --- /dev/null +++ b/keepercommander/service/docker/setup_base.py @@ -0,0 +1,318 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' SetupResult: + """ + Core setup steps that can be reused by integration commands. + Returns a SetupResult object containing all the created resources. + """ + # Total number of steps + total_steps = len(SetupStep) + + # Step 1: Device setup + if not skip_device_setup: + DockerSetupPrinter.print_step(SetupStep.DEVICE_SETUP.value, total_steps, "Checking device settings...") + self._setup_device(params, timeout) + else: + DockerSetupPrinter.print_step(SetupStep.DEVICE_SETUP.value, total_steps, "Skipping device setup (--skip-device-setup)") + + # Step 2: Create shared folder + DockerSetupPrinter.print_step(SetupStep.CREATE_FOLDER.value, total_steps, f"Creating shared folder '{folder_name}'...") + folder_uid = self._create_shared_folder(params, folder_name) + + # Step 3: Create config record + DockerSetupPrinter.print_step(SetupStep.CREATE_RECORD.value, total_steps, f"Creating record '{record_name}'...") + record_uid = self._create_config_record(params, record_name, folder_uid) + + # Step 4: Upload config file + DockerSetupPrinter.print_step(SetupStep.UPLOAD_CONFIG.value, total_steps, "Uploading config.json attachment...") + self._upload_config_file(params, record_uid, config_path) + + # Step 5: Create KSM app + DockerSetupPrinter.print_step(SetupStep.CREATE_KSM_APP.value, total_steps, f"Creating Secrets Manager app '{app_name}'...") + app_uid = self._create_ksm_app(params, app_name) + + # Step 6: Share folder with app + DockerSetupPrinter.print_step(SetupStep.SHARE_FOLDER.value, total_steps, "Sharing folder with app...") + self._share_folder_with_app(params, app_uid, folder_uid) + + # Step 7: Create client device + DockerSetupPrinter.print_step(SetupStep.CREATE_CLIENT.value, total_steps, "Creating client device and generating config...") + b64_config = self._create_client_device(params, app_uid, app_name) + + return SetupResult( + folder_uid=folder_uid, + folder_name=folder_name, + app_uid=app_uid, + app_name=app_name, + record_uid=record_uid, + b64_config=b64_config + ) + + # ======================== + # Core Setup Methods + # ======================== + + def _setup_device(self, params, timeout: str) -> None: + """Check and setup device registration, persistent login, and timeout""" + from ...commands.utils import ThisDeviceCommand + + try: + device_info = ThisDeviceCommand.get_device_info(params) + + # Device registration + if not device_info.get('data_key_present', False): + DockerSetupPrinter.print_warning("Device not registered") + loginv3.LoginV3API.register_encrypted_data_key_for_device(params) + DockerSetupPrinter.print_success("Device registered successfully") + else: + DockerSetupPrinter.print_success("Device already registered") + + # Persistent login + if not device_info.get('persistent_login', False): + DockerSetupPrinter.print_warning("Persistent login disabled") + loginv3.LoginV3API.set_user_setting(params, 'persistent_login', '1') + DockerSetupPrinter.print_success("Persistent login enabled") + else: + DockerSetupPrinter.print_success("Persistent login already enabled") + + # Timeout + DockerSetupPrinter.print_success(f"Setting logout timeout to {timeout}...") + ThisDeviceCommand().execute(params, ops=['timeout', timeout]) + + except Exception as e: + raise CommandError('docker-setup', f'Device setup failed: {str(e)}') + + def _create_shared_folder(self, params, folder_name: str) -> str: + """Create shared folder or return existing one""" + # Check if folder exists + for folder_uid, folder in params.folder_cache.items(): + if folder.name == folder_name and folder_uid in params.shared_folder_cache: + DockerSetupPrinter.print_success("Using existing shared folder") + return folder_uid + + # Create new folder + try: + folder_cmd = FolderMakeCommand() + folder_uid = folder_cmd.execute( + params, + folder=folder_name, + shared_folder=True, + manage_users=True, + manage_records=True, + can_edit=True, + can_share=True + ) + api.sync_down(params) + DockerSetupPrinter.print_success(f"Shared folder created successfully (UID: {folder_uid})") + return folder_uid + except Exception as e: + raise CommandError('docker-setup', f'Failed to create shared folder: {str(e)}') + + def _create_config_record(self, params, record_name: str, folder_uid: str) -> str: + """Create a config record or return existing one""" + # Check if record exists + if folder_uid in params.subfolder_record_cache: + for rec_uid in params.subfolder_record_cache[folder_uid]: + rec = api.get_record(params, rec_uid) + if rec.title == record_name: + DockerSetupPrinter.print_success("Using existing record") + return rec_uid + + # Create new record + try: + record = vault.KeeperRecord.create(params, 'login') + record.record_uid = utils.generate_uid() + record.record_key = utils.generate_aes_key() + record.title = record_name + record.type_name = 'login' + + record_management.add_record_to_folder(params, record, folder_uid) + api.sync_down(params) + + DockerSetupPrinter.print_success(f"Record created successfully (UID: {record.record_uid})") + return record.record_uid + except Exception as e: + raise CommandError('docker-setup', f'Failed to create record: {str(e)}') + + def _upload_config_file(self, params, record_uid: str, config_path: str) -> None: + """Upload config.json as attachment to the record""" + temp_config_path = None + try: + # Clean the config first + cleaned_config_path = self._clean_config_json(config_path) + if cleaned_config_path != config_path: + temp_config_path = cleaned_config_path + + record = vault.KeeperRecord.load(params, record_uid) + if not isinstance(record, (vault.PasswordRecord, vault.TypedRecord)): + raise CommandError('docker-setup', 'Invalid record type for attachments') + + # Upload attachment + upload_task = attachment.FileUploadTask(cleaned_config_path) + upload_task.title = 'config.json' + + attachment.upload_attachments(params, record, [upload_task]) + record_management.update_record(params, record) + params.sync_data = True + api.sync_down(params) + + DockerSetupPrinter.print_success("Config file uploaded successfully") + except Exception as e: + raise CommandError('docker-setup', f'Failed to upload config file: {str(e)}') + finally: + if temp_config_path and os.path.exists(temp_config_path): + try: + os.unlink(temp_config_path) + except OSError as e: + # Log or handle specifically + print(f"Warning: Could not delete temporary config file: {e}") + pass + + def _clean_config_json(self, config_path: str) -> str: + """Clean config.json by keeping only essential authentication keys""" + try: + with open(config_path, 'r') as f: + config_data = json.load(f) + + # Essential keys for authentication + essential_keys = { + 'server', 'user', 'device_token', 'private_key', + 'device_id', 'clone_code', 'session_token', 'data_key' + } + + cleaned_config = {k: v for k, v in config_data.items() if k in essential_keys} + removed_count = len(config_data) - len(cleaned_config) + + if removed_count > 0: + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as tmp_file: + json.dump(cleaned_config, tmp_file, indent=2) + temp_path = tmp_file.name + + DockerSetupPrinter.print_success( + f"Config cleaned (kept {len(cleaned_config)} essential keys, removed {removed_count} non-essential)" + ) + return temp_path + else: + DockerSetupPrinter.print_success("Config is already minimal") + return config_path + + except Exception as e: + DockerSetupPrinter.print_warning(f"Could not clean config: {str(e)}") + return config_path + + def _create_ksm_app(self, params, app_name: str) -> str: + """Create KSM app or return existing one""" + # Check if app exists + existing_app = KSMCommand.get_app_record(params, app_name) + if existing_app: + DockerSetupPrinter.print_success("Using existing app") + return existing_app.get('record_uid') + + # Create new app + try: + # Suppress KSM command output + old_stdout = sys.stdout + sys.stdout = io.StringIO() + try: + KSMCommand.add_new_v5_app(params, app_name, force_to_add=False, format_type='table') + finally: + sys.stdout = old_stdout + + api.sync_down(params) + + app_rec = KSMCommand.get_app_record(params, app_name) + if not app_rec: + raise CommandError('docker-setup', 'Failed to retrieve created app') + + app_uid = app_rec.get('record_uid') + DockerSetupPrinter.print_success(f"App created successfully (UID: {app_uid})") + return app_uid + except Exception as e: + raise CommandError('docker-setup', f'Failed to create KSM app: {str(e)}') + + def _share_folder_with_app(self, params, app_uid: str, folder_uid: str) -> None: + """Share the folder with the KSM app""" + try: + app_rec = KSMCommand.get_app_record(params, app_uid) + if not app_rec: + raise CommandError('docker-setup', 'App not found') + + # Suppress output + old_stdout = sys.stdout + sys.stdout = io.StringIO() + try: + KSMCommand.add_app_share( + params, + secret_uids=[folder_uid], + app_name_or_uid=app_uid, + is_editable=True + ) + finally: + sys.stdout = old_stdout + + DockerSetupPrinter.print_success("Folder shared with app successfully") + except Exception as e: + raise CommandError('docker-setup', f'Failed to share folder with app: {str(e)}') + + def _create_client_device(self, params, app_uid: str, app_name: str) -> str: + """Create client device and return b64 config""" + try: + client_name = f"{app_name} Docker Client" + + tokens_and_devices = KSMCommand.add_client( + params=params, + app_name_or_uid=app_uid, + count=1, + unlock_ip=True, + first_access_expire_on=60, + access_expire_in_min=None, + client_name=client_name, + config_init='b64', + silent=True + ) + + if not tokens_and_devices or len(tokens_and_devices) == 0: + raise CommandError('docker-setup', 'Failed to generate client device') + + b64_config = tokens_and_devices[0]['config'] + DockerSetupPrinter.print_success("Client device created successfully") + + return b64_config + except Exception as e: + raise CommandError('docker-setup', f'Failed to create client device: {str(e)}') + diff --git a/unit-tests/service/test_config_validation.py b/unit-tests/service/test_config_validation.py index e8a3d97d1..f1482d090 100644 --- a/unit-tests/service/test_config_validation.py +++ b/unit-tests/service/test_config_validation.py @@ -13,7 +13,7 @@ def setUp(self): def test_validate_port_valid(self): """Test port validation with valid port numbers""" - test_ports = [80, 443, 8080, 1024, 65535] + test_ports = [1024, 8080, 8900, 9000, 65535] for port in test_ports: with self.subTest(port=port): with patch('socket.socket') as mock_socket: @@ -23,7 +23,7 @@ def test_validate_port_valid(self): def test_validate_port_invalid_number(self): """Test port validation with invalid port numbers""" - invalid_ports = [-1, 65536, 'abc', ''] + invalid_ports = [-1, 0, 80, 443, 1023, 65536, 'abc', ''] for port in invalid_ports: with self.subTest(port=port): with self.assertRaises(ValidationError): diff --git a/unit-tests/service/test_create_service.py b/unit-tests/service/test_create_service.py index d2acb10c6..4c61c839a 100644 --- a/unit-tests/service/test_create_service.py +++ b/unit-tests/service/test_create_service.py @@ -41,7 +41,7 @@ def test_execute_service_already_running(self, mock_service_manager): def test_handle_configuration_streamlined(self): """Test streamlined configuration handling.""" config_data = self.command.service_config.create_default_config() - args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y') + args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y', update_vault_record=None) with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: self.command._handle_configuration(config_data, self.params, args) @@ -50,7 +50,7 @@ def test_handle_configuration_streamlined(self): def test_handle_configuration_interactive(self): """Test interactive configuration handling.""" config_data = self.command.service_config.create_default_config() - args = StreamlineArgs(port=None, commands=None, ngrok=None, allowedip='' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled=None) + args = StreamlineArgs(port=None, commands=None, ngrok=None, allowedip='' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled=None, update_vault_record=None) with patch.object(self.command.config_handler, 'handle_interactive_config') as mock_interactive, \ patch.object(self.command.security_handler, 'configure_security') as mock_security: @@ -61,7 +61,7 @@ def test_handle_configuration_interactive(self): def test_create_and_save_record(self): """Test record creation and saving.""" config_data = self.command.service_config.create_default_config() - args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y') + args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y', update_vault_record=None) with patch.object(self.command.service_config, 'create_record') as mock_create_record, \ patch.object(self.command.service_config, 'save_config') as mock_save_config: @@ -81,7 +81,7 @@ def test_create_and_save_record(self): def test_validation_error_handling(self): """Test handling of validation errors during execution.""" - args = StreamlineArgs(port=-1, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y') + args = StreamlineArgs(port=-1, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y', update_vault_record=None) with patch('builtins.print') as mock_print: with patch.object(self.command.service_config, 'create_default_config') as mock_create_config: @@ -106,7 +106,8 @@ def test_cloudflare_streamlined_configuration(self): certpassword='', fileformat='json', run_mode='foreground', - queue_enabled='y' + queue_enabled='y', + update_vault_record=None ) with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: @@ -128,7 +129,8 @@ def test_cloudflare_validation_missing_token(self): certpassword='', fileformat='json', run_mode='foreground', - queue_enabled='y' + queue_enabled='y', + update_vault_record=None ) with patch('builtins.print') as mock_print: @@ -152,7 +154,8 @@ def test_cloudflare_validation_missing_domain(self): certpassword='', fileformat='json', run_mode='foreground', - queue_enabled='y' + queue_enabled='y', + update_vault_record=None ) with patch('builtins.print') as mock_print: @@ -176,7 +179,8 @@ def test_cloudflare_and_ngrok_mutual_exclusion(self): certpassword='', fileformat='json', run_mode='foreground', - queue_enabled='y' + queue_enabled='y', + update_vault_record=None ) with patch('builtins.print') as mock_print: @@ -211,7 +215,8 @@ def test_cloudflare_tunnel_startup_success(self, mock_cloudflare_configure): certpassword='', fileformat='json', run_mode='foreground', - queue_enabled='y' + queue_enabled='y', + update_vault_record=None ) with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: @@ -257,7 +262,8 @@ def test_cloudflare_tunnel_startup_failure(self, mock_get_status, mock_start_ser certpassword='', fileformat='json', run_mode='foreground', - queue_enabled='y' + queue_enabled='y', + update_vault_record=None ) # Verify that the error was printed @@ -279,7 +285,8 @@ def test_cloudflare_token_validation(self): certpassword='', fileformat='json', run_mode='foreground', - queue_enabled='y' + queue_enabled='y', + update_vault_record=None ) with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: @@ -303,7 +310,8 @@ def test_cloudflare_domain_validation(self): certpassword='', fileformat='json', run_mode='foreground', - queue_enabled='y' + queue_enabled='y', + update_vault_record=None ) with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: From 89ee910a880ef38f42467fd772366613dccea81c Mon Sep 17 00:00:00 2001 From: Ayrris Aunario Date: Thu, 15 Jan 2026 16:38:43 -0600 Subject: [PATCH 14/24] KC-1102: Fix compliance-report chunking for >5000 users (#1769) * Update __init__.py fixed bug in batching of requests for both records and users * Fix chunk processing logic in batching requests for user IDs and problem IDs, removed inefficiency --- keepercommander/sox/__init__.py | 36 ++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/keepercommander/sox/__init__.py b/keepercommander/sox/__init__.py index cd101e888..6d731d376 100644 --- a/keepercommander/sox/__init__.py +++ b/keepercommander/sox/__init__.py @@ -37,6 +37,7 @@ def close_cached_connection(database_name): # type: (str) -> None StorageSharedFolderTeamLink API_SOX_REQUEST_USER_LIMIT = 1000 +API_SOX_MAX_USERS_PER_REQUEST = 5000 # Server limit: MAX_CHOSEN_ENTERPRISE_USERS def validate_data_access(params, cmd=''): @@ -137,7 +138,8 @@ def sync_all(): records_total = 0 print_status(0, users_total, 0, records_total) users, records, links = [], [], [] - chunk_size = 1 + # Start with reasonable chunk size, back off on timeout + chunk_size = min(100, API_SOX_REQUEST_USER_LIMIT) problem_ids = set() while user_ids: token = b'' @@ -159,9 +161,9 @@ def sync_all(): if rs.totalMatchingRecords: current_batch_loaded = 0 records_total = rs.totalMatchingRecords - if records_total < 20 * API_SOX_REQUEST_USER_LIMIT: - # Adjust chunk size to optimize queries - chunk_size = min(chunk_size * 2, API_SOX_REQUEST_USER_LIMIT) + # Ramp up on success (regardless of record count) + if chunk_size < API_SOX_REQUEST_USER_LIMIT: + chunk_size = min(chunk_size * 2, API_SOX_REQUEST_USER_LIMIT) token = rs.continuationToken for user_data in rs.auditUserData: t_user, t_recs, t_links = to_storage_types(user_data, name_by_id) @@ -176,10 +178,10 @@ def sync_all(): if kae.message.lower() == 'gateway_timeout': # Break up the request if the number of corresponding records exceeds the backend's limit if chunk_size > 1: - chunk_size = 1 + chunk_size = max(1, chunk_size // 4) # Back off gradually user_ids = [*chunk, *user_ids] else: - problem_ids.update(*chunk) + problem_ids.update(chunk) break else: raise kae @@ -254,12 +256,22 @@ def do_tasks(): start_spinner() print_status(0) users_uids = [int(uid) for uid in sdata.get_users()] - record_uids_raw = [rec.record_uid_bytes for rec in sdata.get_records().values()] - max_len = API_SOX_REQUEST_USER_LIMIT - total_ruids = len(record_uids_raw) - ruid_chunks = [record_uids_raw[x:x + max_len] for x in range(0, total_ruids, max_len)] - for chunk in ruid_chunks: - sync_chunk(chunk, users_uids) + records_by_uid = {rec.record_uid: rec.record_uid_bytes for rec in sdata.get_records().values()} + max_records = API_SOX_REQUEST_USER_LIMIT + max_users = API_SOX_MAX_USERS_PER_REQUEST + user_chunks = [users_uids[x:x + max_users] for x in range(0, len(users_uids), max_users)] or [users_uids] + for user_chunk in user_chunks: + # Get records owned by users in this chunk + chunk_record_uids = set() + for uid in user_chunk: + user = sdata.get_user(uid) + if user: + chunk_record_uids.update(user.records) + chunk_records_raw = [records_by_uid[r] for r in chunk_record_uids if r in records_by_uid] + # Chunk records by API limit + ruid_chunks = [chunk_records_raw[x:x + max_records] for x in range(0, len(chunk_records_raw), max_records)] + for ruid_chunk in (ruid_chunks or [[]]): + sync_chunk(ruid_chunk, user_chunk) sdata.storage.set_compliance_data_updated() if not spinner: print('', file=sys.stderr, flush=True) From 0d0953c1a850043c69805a929f269cbe9809b22e Mon Sep 17 00:00:00 2001 From: pvagare-ks Date: Fri, 16 Jan 2026 22:25:37 +0530 Subject: [PATCH 15/24] Enterprise command improvements and bug fixes (#1766) (#1770) --- keepercommander/commands/enterprise.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/keepercommander/commands/enterprise.py b/keepercommander/commands/enterprise.py index a33e9591a..46473dc50 100644 --- a/keepercommander/commands/enterprise.py +++ b/keepercommander/commands/enterprise.py @@ -99,7 +99,7 @@ def register_command_info(aliases, command_info): security_audit.register_command_info(aliases, command_info) -SUPPORTED_NODE_COLUMNS = ['parent_node', 'user_count', 'users', 'team_count', 'teams', 'role_count', 'roles', +SUPPORTED_NODE_COLUMNS = ['parent_node', 'parent_id', 'user_count', 'users', 'team_count', 'teams', 'role_count', 'roles', 'provisioning'] SUPPORTED_USER_COLUMNS = ['name', 'status', 'transfer_status', 'node', 'team_count', 'teams', 'role_count', 'roles', 'alias', '2fa_enabled'] @@ -666,7 +666,7 @@ def tree_node(node): if show_nodes: supported_columns = SUPPORTED_NODE_COLUMNS if len(columns) == 0: - columns.update(('parent_node', 'user_count', 'team_count', 'role_count')) + columns.update(('parent_node', 'parent_id', 'user_count', 'team_count', 'role_count')) else: wc = columns.difference(supported_columns) if len(wc) > 0: @@ -714,6 +714,9 @@ def tree_node(node): if column == 'parent_node': parent_id = n.get('parent_id', 0) row.append(self.get_node_path(params, parent_id) if parent_id > 0 else '') + elif column == 'parent_id': + parent_id = n.get('parent_id', 0) + row.append(parent_id if parent_id > 0 else None) elif column == 'user_count': us = n.get('users', []) row.append(len(us)) @@ -1384,8 +1387,7 @@ def is_in_chain(node_id, parent_id): 'node_id': node['node_id'], 'encrypted_data': encrypted_data } - if parent_id: - rq['parent_id'] = parent_id + rq['parent_id'] = parent_id if parent_id else node.get('parent_id') request_batch.append(rq) if request_batch: @@ -1394,7 +1396,8 @@ def is_in_chain(node_id, parent_id): command = rq.get('command') if command == 'node_add': if rs['result'] == 'success': - logging.info('Node is created') + node_id = rq.get('node_id') + logging.info('Node is created with Node ID: %s', node_id) else: logging.warning('Failed to create node: %s', rs['message']) elif command in {'node_delete', 'node_update'}: @@ -3089,6 +3092,7 @@ def execute(self, params, **kwargs): matched_teams = list(matched.values()) request_batch = [] non_batch_update_msgs = [] + has_warnings = False if kwargs.get('add') or kwargs.get('approve'): queue = [] @@ -3184,6 +3188,7 @@ def execute(self, params, **kwargs): users[user_id] = is_add, user_node else: logging.warning('User %s could not be resolved', u) + has_warnings = True if len(users) > 0: for team in matched_teams: @@ -3203,6 +3208,10 @@ def execute(self, params, **kwargs): if t['team_uid'] == team_uid and t['enterprise_user_id'] == user_id) if is_added: if not hsf: + username = user['username'] + team_name = team['name'] + logging.warning('User %s is already a member of team \'%s\'', username, team_name) + has_warnings = True continue rq = { 'command': 'team_enterprise_user_update', @@ -3311,7 +3320,7 @@ def execute(self, params, **kwargs): for update_msg in non_batch_update_msgs: logging.info(update_msg) api.query_enterprise(params) - else: + elif not has_warnings: for team in matched_teams: print('\n') self.display_team(params, team, kwargs.get('verbose')) From 863e45d7ab7271f0ef1bb71ff0b2690891ec2301 Mon Sep 17 00:00:00 2001 From: lthievenaz-keeper Date: Thu, 15 Jan 2026 12:21:13 +0000 Subject: [PATCH 16/24] Add cascade and node privileges to JSON enterprise role Currently running enterprise role in text format yields cascade permissions and node privileges: `er -v 'Keeper Administrator' --format text` This isn't included in the JSON output: `er -v 'Keeper Administrator' --format json` This commit adds the same logic as the text format (ignoring hidden privileges or MSP privileges if user is not MSP), and shows the result in the following format: ``` "managed_nodes": [ { "node_id": 1067368092532738, "node_name": "Demo Node", "cascade": true, "privileges": [ "manage_user", "manage_nodes", "manage_roles", "manage_teams", "transfer_account", "run_reports", "manage_bridge", "manage_record_types", "approve_device", "run_compliance_reports", "sharing_administrator" ] } ], ``` --- keepercommander/commands/enterprise.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/keepercommander/commands/enterprise.py b/keepercommander/commands/enterprise.py index 46473dc50..e0b4b1283 100644 --- a/keepercommander/commands/enterprise.py +++ b/keepercommander/commands/enterprise.py @@ -2847,11 +2847,29 @@ def dump_role_json(self, params, role): if 'managed_nodes' in params.enterprise: node_ids = [x['managed_node_id'] for x in params.enterprise['managed_nodes'] if x['role_id'] == role_id] + is_msp = EnterpriseCommand.is_msp(params) if len(node_ids) > 0: nodes = {x['node_id']: x['data'].get('displayname') or params.enterprise['enterprise_name'] for x in params.enterprise['nodes']} + privileges = {} + supported_privileges = {x[1].lower(): x[2] for x in constants.ROLE_PRIVILEGES} + for rp in params.enterprise.get('role_privileges', []): + privilege = rp['privilege'].lower() + if rp['role_id'] != role_id: + continue + elif privilege not in supported_privileges: + continue + elif supported_privileges[privilege] == constants.PrivilegeScope.Hidden or (supported_privileges[privilege] == constants.PrivilegeScope.MSP and not is_msp): + continue + + if rp['managed_node_id'] not in privileges: + privileges[rp['managed_node_id']] = [] + privileges[rp['managed_node_id']].append(privilege) + ret['managed_nodes'] = [{ 'node_id': x, - 'node_name': nodes[x] + 'node_name': nodes.get(x, None), + 'cascade': [x['cascade_node_management'] for x in params.enterprise['managed_nodes'] if x['role_id'] == role_id][0], + 'privileges': privileges.get(x, None) } for x in node_ids if x in nodes] if 'role_enforcements' in params.enterprise: @@ -4365,4 +4383,4 @@ def _handle_api_error(self, error, domain, action, output_format): 'action': action, }, indent=2) - logging.error(error_msg) \ No newline at end of file + logging.error(error_msg) From 6ac3d2b44018515213ec9428d0c6c83997ce9c4b Mon Sep 17 00:00:00 2001 From: lthievenaz-keeper Date: Thu, 15 Jan 2026 12:55:55 +0000 Subject: [PATCH 17/24] Fix expression for cascade bool --- keepercommander/commands/enterprise.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keepercommander/commands/enterprise.py b/keepercommander/commands/enterprise.py index e0b4b1283..f4c2c7712 100644 --- a/keepercommander/commands/enterprise.py +++ b/keepercommander/commands/enterprise.py @@ -2868,7 +2868,7 @@ def dump_role_json(self, params, role): ret['managed_nodes'] = [{ 'node_id': x, 'node_name': nodes.get(x, None), - 'cascade': [x['cascade_node_management'] for x in params.enterprise['managed_nodes'] if x['role_id'] == role_id][0], + 'cascade': [y['cascade_node_management'] for y in params.enterprise['managed_nodes'] if y['role_id'] == role_id and y['managed_node_id'] == x][0], 'privileges': privileges.get(x, None) } for x in node_ids if x in nodes] From 0177b4ed2191102197da3ad4c101e4d7522bfa48 Mon Sep 17 00:00:00 2001 From: John Walstra Date: Mon, 19 Jan 2026 20:23:25 -0600 Subject: [PATCH 18/24] DR-1173 Add configuration UID param to `pam action service` commands. Allow the user to select configuration UID if gateway has multiple configurations for `pam action service` commands. --- .../commands/discover/job_start.py | 1 - keepercommander/commands/pam_service/add.py | 16 +++++++--- keepercommander/commands/pam_service/list.py | 32 +++++++++++-------- .../commands/pam_service/remove.py | 18 ++++++++--- 4 files changed, 44 insertions(+), 23 deletions(-) diff --git a/keepercommander/commands/discover/job_start.py b/keepercommander/commands/discover/job_start.py index e7f701990..1093ec402 100644 --- a/keepercommander/commands/discover/job_start.py +++ b/keepercommander/commands/discover/job_start.py @@ -101,7 +101,6 @@ def execute(self, params, **kwargs): # Load the configuration record and get the gateway_uid from the facade. gateway = kwargs.get('gateway') - gateway_context = None try: gateway_context = GatewayContext.from_gateway(params=params, gateway=gateway, diff --git a/keepercommander/commands/pam_service/add.py b/keepercommander/commands/pam_service/add.py index 2fae31efa..e471a5c4f 100644 --- a/keepercommander/commands/pam_service/add.py +++ b/keepercommander/commands/pam_service/add.py @@ -1,6 +1,6 @@ from __future__ import annotations import argparse -from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from ...display import bcolors from ... import vault from ...discovery_common.user_service import UserService @@ -22,6 +22,8 @@ class PAMActionServiceAddCommand(PAMGatewayActionDiscoverCommandBase): # The record to base everything on. parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name or UID') + parser.add_argument('--configuration-uid', '-c', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') parser.add_argument('--machine-uid', '-m', required=True, dest='machine_uid', action='store', help='The UID of the Windows Machine record') @@ -42,9 +44,15 @@ def execute(self, params: KeeperParams, **kwargs): print("") - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=kwargs.get('configuration_uid')) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return if gateway_context is None: diff --git a/keepercommander/commands/pam_service/list.py b/keepercommander/commands/pam_service/list.py index 289a83dac..56ba15d0c 100644 --- a/keepercommander/commands/pam_service/list.py +++ b/keepercommander/commands/pam_service/list.py @@ -1,6 +1,6 @@ from __future__ import annotations import argparse -from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from ...display import bcolors from ... import vault from ...discovery_common.user_service import UserService @@ -20,6 +20,8 @@ class PAMActionServiceListCommand(PAMGatewayActionDiscoverCommandBase): # The record to base everything on. parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name or UID') + parser.add_argument('--configuration-uid', '-c', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') def get_parser(self): return PAMActionServiceListCommand.parser @@ -28,13 +30,15 @@ def execute(self, params: KeeperParams, **kwargs): gateway = kwargs.get("gateway") - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") - return - - if gateway_context is None: - print(f" {self._f('Cannot get gateway information. Gateway may not be up.')}") + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=kwargs.get('configuration_uid')) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return user_service = UserService(record=gateway_context.configuration, params=params, fail_on_corrupt=False, @@ -61,22 +65,24 @@ def execute(self, params: KeeperParams, **kwargs): } text = f"{resource_record.title} ({resource_record.record_uid}) :" comma = "" - if acl.is_service is True: + if acl.is_service: text += f" {bcolors.OKGREEN}Services{bcolors.ENDC}" comma = "," - if acl.is_task is True: + if acl.is_task: text += f"{comma} {bcolors.OKGREEN}Scheduled Tasks{bcolors.ENDC}" - if acl.is_iis_pool is True: + if acl.is_iis_pool: text += f"{comma} {bcolors.OKGREEN}IIS Pools{bcolors.ENDC}" - comma = "," service_map[user_record.record_uid]["machines"].append(text) print("") + printed_something = False print(self._h("User Mapping")) for user_uid in service_map: user = service_map[user_uid] + printed_something = True print(f" {self._b(user['title'])} ({user_uid})") for machine in user["machines"]: print(f" * {machine}") print("") - + if not printed_something: + print(f" {bcolors.FAIL}There are no service mappings.{bcolors.ENDC}") diff --git a/keepercommander/commands/pam_service/remove.py b/keepercommander/commands/pam_service/remove.py index c10bcaeb6..e4b68d25f 100644 --- a/keepercommander/commands/pam_service/remove.py +++ b/keepercommander/commands/pam_service/remove.py @@ -1,6 +1,6 @@ from __future__ import annotations import argparse -from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext, MultiConfigurationException, multi_conf_msg from ... import vault from ...discovery_common.constants import PAM_USER, PAM_MACHINE from ...discovery_common.user_service import UserService @@ -19,6 +19,8 @@ class PAMActionServiceRemoveCommand(PAMGatewayActionDiscoverCommandBase): # The record to base everything on. parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', help='Gateway name or UID') + parser.add_argument('--configuration-uid', '-c', required=False, dest='configuration_uid', + action='store', help='PAM configuration UID, if gateway has multiple.') parser.add_argument('--machine-uid', '-m', required=True, dest='machine_uid', action='store', help='The UID of the Windows Machine record') @@ -39,9 +41,15 @@ def execute(self, params: KeeperParams, **kwargs): print("") - gateway_context = GatewayContext.from_gateway(params, gateway) - if gateway_context is None: - print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + try: + gateway_context = GatewayContext.from_gateway(params=params, + gateway=gateway, + configuration_uid=kwargs.get('configuration_uid')) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.{bcolors.ENDC}") + return + except MultiConfigurationException as err: + multi_conf_msg(gateway, err) return if gateway_context is None: @@ -92,7 +100,7 @@ def execute(self, params: KeeperParams, **kwargs): else: acl.is_iis_pool = False - if user_service.dag.get_root.has(machine_vertex) is False: + if not user_service.dag.get_root.has(machine_vertex): user_service.belongs_to(gateway_context.configuration_uid, machine_vertex.uid) user_service.belongs_to(machine_vertex.uid, user_vertex.uid, acl=acl) From 350e8f2b1af7b865b07488cc5d4b05444125bc1c Mon Sep 17 00:00:00 2001 From: lthievenaz-keeper Date: Tue, 20 Jan 2026 22:08:09 +0000 Subject: [PATCH 19/24] Expand create-user scripts (#1749) * Rename user_create.py to user_onboarding__create_and_push.py * Create user_onboarding__create_and_login.py Added script that is similar to the create_and_push one, however in this one you can *login as* the created users in commander and run any commands from the SDK in their vault directly - yielding more capabilities than the enterprise-push command. --- examples/user_onboarding__create_and_login.py | 171 ++++++++++++++++++ ...py => user_onboarding__create_and_push.py} | 0 2 files changed, 171 insertions(+) create mode 100644 examples/user_onboarding__create_and_login.py rename examples/{user_create.py => user_onboarding__create_and_push.py} (100%) diff --git a/examples/user_onboarding__create_and_login.py b/examples/user_onboarding__create_and_login.py new file mode 100644 index 000000000..9b3802038 --- /dev/null +++ b/examples/user_onboarding__create_and_login.py @@ -0,0 +1,171 @@ +''' _ __ + | |/ /___ ___ _ __ ___ _ _ ® + | ' list,list + api.query_enterprise(params) + active_usernames = [user['username'] for user in params.enterprise['users'] if user['status']!='invited'] + invited_users = [user for user in params.enterprise['users'] if user['status']=='invited'] + + return active_usernames, invited_users + + +def generate_password(params,length=20): # (KeeperParams, int) => str + from keepercommander.generator import generate + import re + password_rules, min_iterations = login_v3_flow.get_default_password_rules(params) + while True: + password = generate(length) + + failed_rules = [] + for rule in password_rules: + pattern = re.compile(rule.pattern) + if not re.match(pattern, password): + failed_rules.append(rule.description) + if len(failed_rules) == 0: + return password + + +def get_user_vault(admin_params, user, folder=None, password_length=20, replace_invited=True): # (KeeperParams, dict, str, int, bool) => KeeperParams + ''' + user_dict_format = { + 'username': 'user@email.com' + 'node_id': 1067368092533492, # Optional, also supports name + 'full_name': 'Example Name', # Optional + 'job_title': 'Example Job Title' # Optional + } + Folder must already exist in admin vault for folder flag + ''' + + from keepercommander.commands.enterprise_create_user import CreateEnterpriseUserCommand + + if not user['username']: + print('get_user_vault function needs at least a username') + return + email = user['username'] + + # Get all users by status + active_usernames, invited_users = compile_users(admin_params) + + # Delete invited (if allowed) + for invited_user in invited_users: + if invited_user['username'] == email: + print(f'Invited user for {email} found',end='') + if not replace_invited: + print(' - Not allowed to replace, could not create user.') + return + print(' - replacing...') + eu.execute(admin_params,email=[email],delete=True,force=True) + # replace empty user fields with that of found user + for key in ['node_id','full_name','job_title']: + if user.get(key,None) is None and invited_user.get(key,None) is not None: + user[key] = invited_user[key] + + # Create user + user_record = None + if email not in active_usernames: + print(f'Creating user vault for {email}...') + record_uid = CreateEnterpriseUserCommand().execute(admin_params,email=email,node=user.get('node_id',None),name=user.get('full_name',None),folder=folder) + user_record = api.get_record(admin_params,record_uid) + eu.execute(admin_params,email=[email],jobtitle=user.get('job_title',None)) + else: + print(f'Active user found for {email}. Could not create user, but will attempt to sign in using vault records.') + record_search = api.search_records(admin_params,f'Keeper Account: {email}') + if len(record_search)!=1: + print(f'Error looking up record with title "Keeper Account: {email}". Could not sign in as user.') + return + user_record = record_search[0] + + if user_record is None: + print(f'Error looking up record with UID {record_uid}') + return + + # Sign in as user + print(f'Signing in as user {email}...') + user_params = KeeperParams() + user_params.user = email + user_params.password = user_record.password + + if email not in active_usernames: + # Reset tmp pwd + new_password = generate_password(admin_params) + login_v3_flow.login(user_params, new_password_if_reset_required=new_password) + + # Update record password + user_params.password = new_password + from keepercommander.commands.record_edit import RecordUpdateCommand + RecordUpdateCommand().execute(admin_params, record=record_uid, fields=[f'password={new_password}']) + + api.login(user_params) + api.sync_down(user_params) + print('Sign in Successful') + return user_params + + +# RUNTIME + +# Login as admin +print('Signing in as admin...') +admin_params = KeeperParams() +admin_params.user = input('Admin email: ') +api.login(admin_params) +api.sync_down(admin_params) + +# Create/get vault for User A (minimal example) +user_a_params = get_user_vault(admin_params,{'username':USER_A}) +# Create/get vault for User B (extended example) +user_b_params = get_user_vault( + admin_params, + { + 'username':USER_B, + 'full_name': 'Jane Doe', + 'job_title': 'DevOps Engineer' + }, + folder='DevOps users' +) + +# Run ad-hoc commands for User A +cli.do_command(user_a_params,'mkdir "Sample user folder" -uf') +cli.do_command(user_a_params,'record-add -rt login -t "Sample record" --folder "Sample user folder"') + +from keepercommander.importer.imp_exp import _import as run_import +# Run CSV import for User A +run_import(user_a_params, 'csv', 'csv_file.csv') + +# Run JSON import for User B +run_import(user_b_params, 'json', 'json_file.json') + +# Re-expire Master Passwords +eu.execute(admin_params, email=[USER_A,USER_B], expire=True, force=True) diff --git a/examples/user_create.py b/examples/user_onboarding__create_and_push.py similarity index 100% rename from examples/user_create.py rename to examples/user_onboarding__create_and_push.py From 289f485f79cacf4370bdf35c93d9dcd3fc5a6df6 Mon Sep 17 00:00:00 2001 From: amangalampalli-ks Date: Wed, 21 Jan 2026 21:05:12 +0530 Subject: [PATCH 20/24] Add tunneling options to slack-app-setup and enhance service mode configuration (#1772) (#1775) * Add tunnelling to setup commands and adv. security configuration in streamline service-create * Update Readme and unit-tests * Remove -enc flag and update default KSM app name * Fix review comments, pentest port expose issue, remove tls from setup commands, remove api-key from docker and UI print messages --- keepercommander/enforcement.py | 2 +- keepercommander/service/README.md | 119 ++++++++++- .../service/commands/create_service.py | 29 ++- .../commands/service_config_handlers.py | 56 +++++- .../service/commands/service_docker_setup.py | 184 +++++++++++------- .../service/commands/slack_app_setup.py | 54 ++--- .../service/config/command_validator.py | 2 +- .../service/config/config_validation.py | 5 + .../service/config/record_handler.py | 28 ++- .../service/config/service_config.py | 6 +- .../service/decorators/security.py | 24 +-- .../service/docker/compose_builder.py | 60 +++--- keepercommander/service/docker/models.py | 25 ++- keepercommander/service/docker/setup_base.py | 95 ++++++++- unit-tests/service/test_config_validation.py | 4 + unit-tests/service/test_create_service.py | 52 +++-- unit-tests/service/test_service_config.py | 4 +- 17 files changed, 555 insertions(+), 194 deletions(-) diff --git a/keepercommander/enforcement.py b/keepercommander/enforcement.py index 2e0a91338..2ab093115 100644 --- a/keepercommander/enforcement.py +++ b/keepercommander/enforcement.py @@ -51,7 +51,7 @@ def requires_master_password_reentry(cls, params: KeeperParams, operation: str = operation = operation.strip()[:100] # Limit length and strip whitespace # Bypass enforcement when running in service mode if params and hasattr(params, 'service_mode') and params.service_mode: - logging.info(f"Bypassing master password enforcement for operation '{operation}' - running in service mode") + logging.debug(f"Bypassing master password enforcement for operation '{operation}' - running in service mode") return False if not params or not params.enforcements: diff --git a/keepercommander/service/README.md b/keepercommander/service/README.md index 8077e3bef..bfb70e26b 100644 --- a/keepercommander/service/README.md +++ b/keepercommander/service/README.md @@ -18,6 +18,8 @@ The Service Mode module for Keeper Commander enables REST API integration by pro | `service-stop` | Gracefully stop the running service | | `service-status` | Display current service status | | `service-config-add` | Add new API configuration and command access settings | +| `service-docker-setup` | Automated Docker service mode setup with KSM configuration | +| `slack-app-setup` | Automated Slack App integration setup with Commander Service Mode | ### Security Features - API key authentication @@ -90,6 +92,9 @@ Parameters: - `-q, --queue_enabled`: Enable request queue (y/n) - `-dip, --deniedip`: Denied IP list to access service - `-aip, --allowedip`: Allowed IP list to access service +- `-rl, --ratelimit`: Rate limit (e.g., "10/minute") +- `-ek, --encryption_key`: Encryption key for response encryption (automatically enables encryption) +- `-te, --token_expiration`: Token expiration time (e.g., "30m", "24h", "7d") ### Service Management @@ -208,7 +213,8 @@ result_retention: 3600 # Result retention (1 hour) #### Rate Limiting - **Default limits**: 60/minute, 600/hour, 6000/day -- **Example**: Setting `"20/minute"` effectively provides ~20 requests per minute across all endpoints +- **Per-endpoint tracking**: Each API endpoint has independent rate limit counters +- **Example**: Setting `"20/minute"` provides 20 requests per minute per endpoint per IP address #### Error Responses @@ -294,7 +300,7 @@ curl -X POST 'http://localhost:/api/v2/executecommand-async' \ The service configuration is stored as an attachment to a vault record in JSON/YAML format and includes: -- **Service Title**: Identifier for the service configuration +- **Service Title**: Identifier for the service configuration (default: "Commander Service Mode Config") - **Port Number**: Port for the API server - **Run Mode**: Service execution mode (foreground/background) - **Ngrok Configuration** (optional): @@ -401,9 +407,99 @@ Verify the image was pulled: docker images | grep keeper/commander ``` -### Authentication Methods +### Quick Setup with service-docker-setup (Recommended) -The Docker container supports four authentication methods: +If you have Keeper Secrets Manager (KSM) activated in your account, you can use the `service-docker-setup` command for automated Docker deployment setup: + +**Prerequisites:** +- Active Keeper vault with KSM enabled +- Docker installed and image pulled + +**Setup Steps:** + +1. **Login to Keeper:** + ```bash + keeper shell + My Vault> login user@example.com + ``` + +2. **Run automated setup:** + ```bash + My Vault> service-docker-setup + ``` + + This command will automatically: + - Register your device and enable persistent login + - Create a shared folder ("Commander Service Mode - Docker") + - Create a config record with `config.json` attachment + - Create a KSM application + - Share the folder with the KSM app + - Generate a KSM client device with base64 config + - Generate `docker-compose.yml` with the complete configuration + +3. **Interactive Configuration:** + + You'll be prompted to configure: + - **Port**: Service port (default: 8900) + - **Commands**: Allowed commands (default: tree,ls) + - **Queue Mode**: Enable async API v2 (default: yes) + - **Ngrok Tunneling** (optional): Public URL via ngrok + - **Cloudflare Tunneling** (optional): Public URL via Cloudflare + - **Advanced Security** (optional): + - IP filtering (allowed/denied lists) + - Rate limiting + - Response encryption + - Token expiration + +4. **Deploy:** + ```bash + My Vault> quit + $ rm ~/.keeper/config.json # Prevent device token conflicts + $ docker compose up -d + ``` + +**Example Output:** +``` +Resources Created: + • Shared Folder: Commander Service Mode - Docker + • KSM App: Commander Service Mode - KSM App + • Config Record: + • KSM Base64 Config: ✓ Generated +``` + +The generated `docker-compose.yml` includes all your configuration and can be customized before deployment. + +### Slack App Integration Setup + +For integrating Commander Service Mode with Slack, use the `slack-app-setup` command: + +```bash +My Vault> slack-app-setup +``` + +This automates the complete setup for Slack App integration: +- **Phase 1**: Runs Docker setup (same as `service-docker-setup`) +- **Phase 2**: Configures Slack App integration + - Collects Slack tokens (App Token, Bot Token, Signing Secret) + - Creates Slack configuration record + - Updates `docker-compose.yml` with Slack App service + - Supports optional PEDM and Device Approval integrations + +**Configuration Options:** +- Port selection (default: 8900) +- Ngrok/Cloudflare tunneling for public URL exposure +- Slack App credentials +- Approvals channel ID +- Optional PEDM integration +- Optional SSO Cloud Device Approval + +The command generates a complete `docker-compose.yml` with both Commander service and Slack App service configured. + +--- + +### Manual Authentication Methods (Alternative) + +If you prefer manual setup or don't have KSM activated, the Docker container supports four authentication methods: #### Method 1: Using KSM Config File Use Keeper Secrets Manager (KSM) config file to download the `config.json` configuration from a Keeper record. The container will: @@ -662,11 +758,16 @@ docker run -d -p : \ docker logs ``` -3. **Get API key from logs:** - Look for the API key in the container logs: - ``` - Generated API key: - ``` +3. **Get API key from logs or vault:** + - **Docker mode**: The API key is redacted in logs for security (only last 4 characters shown) with the vault record UID displayed: + ``` + Generated API key: ****nQ= (stored in vault record: I2eqTs5efnJ_iqbtSuEagQ) + ``` + Retrieve the full key from your Keeper vault using the record UID. + - **Direct service-create**: The full API key is displayed in the output for immediate use: + ``` + Generated API key: H4uyn0L-_QJL-o_UBMbs7DESA13ZgdJ_ea2bnQ= + ``` 4. **Follow logs in real-time:** ```bash diff --git a/keepercommander/service/commands/create_service.py b/keepercommander/service/commands/create_service.py index c77c062c0..cca65e787 100644 --- a/keepercommander/service/commands/create_service.py +++ b/keepercommander/service/commands/create_service.py @@ -29,11 +29,14 @@ class StreamlineArgs: cloudflare: Optional[str] cloudflare_custom_domain: Optional[str] certfile: Optional[str] - certpassword : Optional[str] - fileformat : Optional[str] + certpassword: Optional[str] + fileformat: Optional[str] run_mode: Optional[str] queue_enabled: Optional[str] update_vault_record: Optional[str] + ratelimit: Optional[str] + encryption_key: Optional[str] + token_expiration: Optional[str] class CreateService(Command): """Command to create a new service configuration.""" @@ -74,6 +77,9 @@ def get_parser(self): parser.add_argument('-rm', '--run_mode', type=str, help='run mode') parser.add_argument('-q', '--queue_enabled', type=str, help='enable request queue (y/n)') parser.add_argument('-ur', '--update-vault-record', dest='update_vault_record', type=str, help='CSMD Config record UID to update with service metadata (Docker mode)') + parser.add_argument('-rl', '--ratelimit', type=str, help='rate limit (e.g., 10/minute, 100/hour)') + parser.add_argument('-ek', '--encryption_key', type=str, help='encryption key for response encryption (32 alphanumeric characters)') + parser.add_argument('-te', '--token_expiration', type=str, help='API token expiration (e.g., 30m, 24h, 7d)') return parser def execute(self, params: KeeperParams, **kwargs) -> None: @@ -88,7 +94,7 @@ def execute(self, params: KeeperParams, **kwargs) -> None: config_data = self.service_config.create_default_config() - filtered_kwargs = {k: v for k, v in kwargs.items() if k in ['port', 'allowedip', 'deniedip', 'commands', 'ngrok', 'ngrok_custom_domain', 'cloudflare', 'cloudflare_custom_domain', 'certfile', 'certpassword', 'fileformat', 'run_mode', 'queue_enabled', 'update_vault_record']} + filtered_kwargs = {k: v for k, v in kwargs.items() if k in ['port', 'allowedip', 'deniedip', 'commands', 'ngrok', 'ngrok_custom_domain', 'cloudflare', 'cloudflare_custom_domain', 'certfile', 'certpassword', 'fileformat', 'run_mode', 'queue_enabled', 'update_vault_record', 'ratelimit', 'encryption', 'encryption_key', 'token_expiration']} args = StreamlineArgs(**filtered_kwargs) self._handle_configuration(config_data, params, args) api_key = self._create_and_save_record(config_data, params, args) @@ -118,7 +124,7 @@ def _create_and_save_record(self, config_data: Dict[str, Any], params: KeeperPar if args.port is None: self.config_handler._configure_run_mode(config_data) - record = self.service_config.create_record(config_data["is_advanced_security_enabled"], params, args.commands) + record = self.service_config.create_record(config_data["is_advanced_security_enabled"], params, args.commands, args.token_expiration, args.update_vault_record) config_data["records"] = [record] if config_data.get("fileformat"): format_type = config_data["fileformat"] @@ -138,17 +144,24 @@ def _upload_and_start_service(self, params: KeeperParams) -> None: ServiceManager.start_service() def _get_service_url(self, config_data: Dict[str, Any]) -> str: - """Determine the actual service URL (ngrok, cloudflare, or localhost)""" + """Determine the actual service URL (ngrok, cloudflare, or localhost) with API version path""" + # Determine API version based on queue_enabled + queue_enabled = config_data.get("queue_enabled", "y") + api_path = "/api/v2" if queue_enabled == "y" else "/api/v1" + # Priority: ngrok > cloudflare > localhost + base_url = "" if config_data.get("ngrok_public_url"): - return config_data["ngrok_public_url"] + base_url = config_data["ngrok_public_url"] elif config_data.get("cloudflare_public_url"): - return config_data["cloudflare_public_url"] + base_url = config_data["cloudflare_public_url"] else: # Fallback to localhost with correct protocol port = config_data.get("port", 8080) protocol = "https" if config_data.get("tls_certificate") == "y" else "http" - return f"{protocol}://localhost:{port}" + base_url = f"{protocol}://localhost:{port}" + + return f"{base_url}{api_path}" def _update_vault_record_with_metadata(self, params: KeeperParams, record_uid: str, service_url: str, api_key: str) -> None: """Update CSMD Config vault record with service URL and API key as custom fields (Docker mode only)""" diff --git a/keepercommander/service/commands/service_config_handlers.py b/keepercommander/service/commands/service_config_handlers.py index 51a6dea43..c63a01072 100644 --- a/keepercommander/service/commands/service_config_handlers.py +++ b/keepercommander/service/commands/service_config_handlers.py @@ -65,6 +65,9 @@ def handle_streamlined_config(self, config_data: Dict[str, Any], args, params: K cloudflare_enabled = "y" if args.cloudflare else "n" # Implement the same logic as interactive mode + ngrok_public_url = "" + cloudflare_public_url = "" + if ngrok_enabled == "y": # ngrok enabled → disable cloudflare and TLS cloudflare_enabled = "n" @@ -73,6 +76,14 @@ def handle_streamlined_config(self, config_data: Dict[str, Any], args, params: K tls_enabled = "n" certfile = "" certpassword = "" + # Construct ngrok public URL from custom domain + if args.ngrok_custom_domain: + ngrok_domain = args.ngrok_custom_domain.strip() + # If it's just a subdomain (no dots), append .ngrok.io + if '.' not in ngrok_domain: + ngrok_public_url = f"https://{ngrok_domain}.ngrok.io" + else: + ngrok_public_url = f"https://{ngrok_domain}" logger.debug("Ngrok enabled - disabling cloudflare and TLS") elif cloudflare_enabled == "y": # cloudflare enabled → disable TLS, but validate required fields @@ -86,6 +97,8 @@ def handle_streamlined_config(self, config_data: Dict[str, Any], args, params: K certpassword = "" cloudflare_token = self.service_config.validator.validate_cloudflare_token(args.cloudflare) cloudflare_domain = self.service_config.validator.validate_domain(args.cloudflare_custom_domain) + # Construct cloudflare public URL from custom domain + cloudflare_public_url = f"https://{cloudflare_domain}" logger.debug("Cloudflare enabled - disabling TLS") else: # Both ngrok and cloudflare disabled → allow TLS @@ -96,6 +109,21 @@ def handle_streamlined_config(self, config_data: Dict[str, Any], args, params: K cloudflare_domain = "" logger.debug("No tunnels enabled - TLS configuration allowed") + # Handle advanced security options + rate_limiting = "" + if args.ratelimit: + rate_limiting = self.service_config.validator.validate_rate_limit(args.ratelimit) + + encryption_enabled = "n" + encryption_key = "" + if args.encryption_key: + encryption_enabled = "y" + encryption_key = self.service_config.validator.validate_encryption_key(args.encryption_key) + + # Validate token expiration format if provided (actual usage is in record creation) + if args.token_expiration: + self.service_config.validator.parse_expiration_time(args.token_expiration) + config_data.update({ "port": self.service_config.validator.validate_port(args.port), "ip_allowed_list": self.service_config.validator.validate_ip_list(args.allowedip), @@ -106,15 +134,20 @@ def handle_streamlined_config(self, config_data: Dict[str, Any], args, params: K if ngrok_enabled == "y" else "" ), "ngrok_custom_domain": args.ngrok_custom_domain if ngrok_enabled == "y" else "", + "ngrok_public_url": ngrok_public_url, "cloudflare": cloudflare_enabled, "cloudflare_tunnel_token": cloudflare_token, "cloudflare_custom_domain": cloudflare_domain, + "cloudflare_public_url": cloudflare_public_url, "tls_certificate": tls_enabled, "certfile": certfile, "certpassword": certpassword, "fileformat": args.fileformat, # Keep original logic - can be None "run_mode": run_mode, - "queue_enabled": queue_enabled + "queue_enabled": queue_enabled, + "rate_limiting": rate_limiting, + "encryption": encryption_enabled, + "encryption_private_key": encryption_key }) @debug_decorator @@ -150,6 +183,7 @@ def _configure_tunneling_and_tls(self, config_data: Dict[str, Any]) -> None: config_data["cloudflare"] = "n" config_data["cloudflare_tunnel_token"] = "" config_data["cloudflare_custom_domain"] = "" + config_data["cloudflare_public_url"] = "" config_data["tls_certificate"] = "n" config_data["certfile"] = "" config_data["certpassword"] = "" @@ -174,13 +208,23 @@ def _configure_ngrok(self, config_data: Dict[str, Any]) -> None: try: token = input(self.messages['ngrok_token_prompt']) config_data["ngrok_auth_token"] = self.service_config.validator.validate_ngrok_token(token) - config_data["ngrok_custom_domain"] = input(self.messages['ngrok_custom_domain_prompt']) - # print(f"ngrok custom domain >> "+{config_data["ngrok_custom_domain"]}) + config_data["ngrok_custom_domain"] = input(self.messages['ngrok_custom_domain_prompt']) + # Construct ngrok public URL from custom domain + if config_data["ngrok_custom_domain"]: + ngrok_domain = config_data["ngrok_custom_domain"].strip() + # If it's just a subdomain (no dots), append .ngrok.io + if '.' not in ngrok_domain: + config_data["ngrok_public_url"] = f"https://{ngrok_domain}.ngrok.io" + else: + config_data["ngrok_public_url"] = f"https://{ngrok_domain}" + else: + config_data["ngrok_public_url"] = "" break except ValidationError as e: print(f"{self.validation_messages['invalid_ngrok_token']} {str(e)}") else: config_data["ngrok_auth_token"] = "" + config_data["ngrok_public_url"] = "" def _configure_cloudflare(self, config_data: Dict[str, Any]) -> None: config_data["cloudflare"] = self.service_config._get_yes_no_input( @@ -201,9 +245,15 @@ def _configure_cloudflare(self, config_data: Dict[str, Any]) -> None: error_key='invalid_cloudflare_domain', required=True ) + # Construct cloudflare public URL from custom domain + if config_data["cloudflare_custom_domain"]: + config_data["cloudflare_public_url"] = f"https://{config_data['cloudflare_custom_domain']}" + else: + config_data["cloudflare_public_url"] = "" else: config_data["cloudflare_tunnel_token"] = "" config_data["cloudflare_custom_domain"] = "" + config_data["cloudflare_public_url"] = "" def _configure_tls(self, config_data: Dict[str, Any]) -> None: config_data["tls_certificate"] = self.service_config._get_yes_no_input(self.messages['tls_certificate']) diff --git a/keepercommander/service/commands/service_docker_setup.py b/keepercommander/service/commands/service_docker_setup.py index aec0dd2a7..9f58d871a 100644 --- a/keepercommander/service/commands/service_docker_setup.py +++ b/keepercommander/service/commands/service_docker_setup.py @@ -117,17 +117,14 @@ def get_service_configuration(self, params) -> ServiceConfig: if not ngrok_config['ngrok_enabled']: cloudflare_config = self._get_cloudflare_config() - - # TLS only if no tunneling - if not cloudflare_config['cloudflare_enabled']: - tls_config = self._get_tls_config() - else: - tls_config = {'tls_enabled': False, 'cert_file': '', 'cert_password': ''} else: cloudflare_config = { - 'cloudflare_enabled': False, 'cloudflare_tunnel_token': '', 'cloudflare_custom_domain': '' + 'cloudflare_enabled': False, 'cloudflare_tunnel_token': '', + 'cloudflare_custom_domain': '', 'cloudflare_public_url': '' } - tls_config = {'tls_enabled': False, 'cert_file': '', 'cert_password': ''} + + # Advanced security options + security_config = self._get_advanced_security_config() return ServiceConfig( port=port, @@ -136,12 +133,17 @@ def get_service_configuration(self, params) -> ServiceConfig: ngrok_enabled=ngrok_config['ngrok_enabled'], ngrok_auth_token=ngrok_config['ngrok_auth_token'], ngrok_custom_domain=ngrok_config['ngrok_custom_domain'], + ngrok_public_url=ngrok_config.get('ngrok_public_url', ''), cloudflare_enabled=cloudflare_config['cloudflare_enabled'], cloudflare_tunnel_token=cloudflare_config['cloudflare_tunnel_token'], cloudflare_custom_domain=cloudflare_config['cloudflare_custom_domain'], - tls_enabled=tls_config['tls_enabled'], - cert_file=tls_config['cert_file'], - cert_password=tls_config['cert_password'] + cloudflare_public_url=cloudflare_config.get('cloudflare_public_url', ''), + allowed_ip=security_config['allowed_ip'], + denied_ip=security_config['denied_ip'], + rate_limit=security_config['rate_limit'], + encryption_enabled=security_config['encryption_enabled'], + encryption_key=security_config['encryption_key'], + token_expiration=security_config['token_expiration'] ) def generate_docker_compose_yaml(self, setup_result: SetupResult, config: ServiceConfig) -> str: @@ -214,98 +216,136 @@ def _get_queue_config(self) -> bool: queue_input = input(f"{bcolors.OKBLUE}Enable queue mode? [Press Enter for Yes] (y/n):{bcolors.ENDC} ").strip().lower() return queue_input != 'n' - def _get_ngrok_config(self) -> Dict[str, Any]: - """Get ngrok configuration""" - print(f"\n{bcolors.BOLD}Ngrok Tunneling (optional):{bcolors.ENDC}") - print(f" Generate a public URL for your service using ngrok") - use_ngrok = input(f"{bcolors.OKBLUE}Enable ngrok? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + + def _get_advanced_security_config(self) -> Dict[str, Any]: + """Get advanced security configuration""" + print(f"\n{bcolors.BOLD}Advanced Security (optional):{bcolors.ENDC}") + print(f" Configure IP filtering, rate limiting, and response encryption") + enable_advanced = input(f"{bcolors.OKBLUE}Enable advanced security? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' - config = {'ngrok_enabled': use_ngrok, 'ngrok_auth_token': '', 'ngrok_custom_domain': ''} + config = { + 'allowed_ip': '0.0.0.0/0,::/0', + 'denied_ip': '', + 'rate_limit': '', + 'encryption_enabled': False, + 'encryption_key': '', + 'token_expiration': '' + } - if use_ngrok: + if enable_advanced: + # IP Allowed List + config.update(self._get_ip_allowed_config()) + + # IP Denied List + config.update(self._get_ip_denied_config()) + + # Rate Limiting + config.update(self._get_rate_limit_config()) + + # Encryption + config.update(self._get_encryption_config()) + + # Token Expiration + config.update(self._get_token_expiration_config()) + + return config + + def _get_ip_allowed_config(self) -> Dict[str, str]: + """Get allowed IP configuration""" + print(f"\n{bcolors.BOLD}IP Allowed List:{bcolors.ENDC}") + print(f" Comma-separated IPs or CIDR ranges (e.g., 192.168.1.0/24,10.0.0.1)") + + ip_list = input(f"{bcolors.OKBLUE}Allowed IPs [Press Enter for all]:{bcolors.ENDC} ").strip() + + if ip_list: while True: - token = input(f"{bcolors.OKBLUE}Ngrok auth token:{bcolors.ENDC} ").strip() try: - config['ngrok_auth_token'] = ConfigValidator.validate_ngrok_token(token) - break + return {'allowed_ip': ConfigValidator.validate_ip_list(ip_list)} except ValidationError as e: print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") - - # Validate custom domain if provided (ngrok allows subdomain prefixes) - domain = input(f"{bcolors.OKBLUE}Ngrok custom domain [Press Enter to skip]:{bcolors.ENDC} ").strip() - if domain: - while True: - try: - config['ngrok_custom_domain'] = ConfigValidator.validate_domain(domain, require_tld=False) + ip_list = input(f"{bcolors.OKBLUE}Allowed IPs [Press Enter for all]:{bcolors.ENDC} ").strip() + if not ip_list: break - except ValidationError as e: - print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") - domain = input(f"{bcolors.OKBLUE}Ngrok custom domain [Press Enter to skip]:{bcolors.ENDC} ").strip() - if not domain: - break - return config + return {'allowed_ip': '0.0.0.0/0,::/0'} - def _get_cloudflare_config(self) -> Dict[str, Any]: - """Get Cloudflare configuration""" - print(f"\n{bcolors.BOLD}Cloudflare Tunneling (optional):{bcolors.ENDC}") - print(f" Generate a public URL for your service using Cloudflare") - use_cloudflare = input(f"{bcolors.OKBLUE}Enable Cloudflare? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + def _get_ip_denied_config(self) -> Dict[str, str]: + """Get denied IP configuration""" + print(f"\n{bcolors.BOLD}IP Denied List:{bcolors.ENDC}") + print(f" Comma-separated IPs or CIDR ranges to block") - config = {'cloudflare_enabled': use_cloudflare, 'cloudflare_tunnel_token': '', 'cloudflare_custom_domain': ''} + ip_list = input(f"{bcolors.OKBLUE}Denied IPs [Press Enter to skip]:{bcolors.ENDC} ").strip() - if use_cloudflare: + if ip_list: while True: - token = input(f"{bcolors.OKBLUE}Cloudflare tunnel token:{bcolors.ENDC} ").strip() try: - config['cloudflare_tunnel_token'] = ConfigValidator.validate_cloudflare_token(token) - break + return {'denied_ip': ConfigValidator.validate_ip_list(ip_list)} except ValidationError as e: print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") - + ip_list = input(f"{bcolors.OKBLUE}Denied IPs [Press Enter to skip]:{bcolors.ENDC} ").strip() + if not ip_list: + break + + return {'denied_ip': ''} + + def _get_rate_limit_config(self) -> Dict[str, str]: + """Get rate limiting configuration""" + print(f"\n{bcolors.BOLD}Rate Limiting:{bcolors.ENDC}") + print(f" Format: / (e.g., 10/minute, 100/hour, 1000/day)") + + rate_limit = input(f"{bcolors.OKBLUE}Rate limit [Press Enter to skip]:{bcolors.ENDC} ").strip() + + if rate_limit: while True: - domain = input(f"{bcolors.OKBLUE}Cloudflare custom domain:{bcolors.ENDC} ").strip() try: - config['cloudflare_custom_domain'] = ConfigValidator.validate_domain(domain) + return {'rate_limit': ConfigValidator.validate_rate_limit(rate_limit)} + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + rate_limit = input(f"{bcolors.OKBLUE}Rate limit [Press Enter to skip]:{bcolors.ENDC} ").strip() + if not rate_limit: + break + + return {'rate_limit': ''} + + def _get_encryption_config(self) -> Dict[str, Any]: + """Get encryption configuration""" + print(f"\n{bcolors.BOLD}Response Encryption:{bcolors.ENDC}") + print(f" Enable AES-256 encryption for API responses") + enable_encryption = input(f"{bcolors.OKBLUE}Enable encryption? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + + config = {'encryption_enabled': enable_encryption, 'encryption_key': ''} + + if enable_encryption: + print(f" Encryption key must be exactly 32 alphanumeric characters") + while True: + key = input(f"{bcolors.OKBLUE}Encryption key (32 chars):{bcolors.ENDC} ").strip() + try: + config['encryption_key'] = ConfigValidator.validate_encryption_key(key) break except ValidationError as e: print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") return config - def _get_tls_config(self) -> Dict[str, Any]: - """Get TLS configuration""" - print(f"\n{bcolors.BOLD}TLS Certificate (optional):{bcolors.ENDC}") - print(f" Use custom TLS certificate for HTTPS") - use_tls = input(f"{bcolors.OKBLUE}Enable TLS? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + def _get_token_expiration_config(self) -> Dict[str, str]: + """Get token expiration configuration""" + print(f"\n{bcolors.BOLD}API Token Expiration:{bcolors.ENDC}") + print(f" Format: Xm (minutes), Xh (hours), Xd (days) - e.g., 30m, 24h, 7d") - config = {'tls_enabled': use_tls, 'cert_file': '', 'cert_password': ''} + expiration = input(f"{bcolors.OKBLUE}Token expiration [Press Enter for never]:{bcolors.ENDC} ").strip() - if use_tls: + if expiration: while True: - cert_file = input(f"{bcolors.OKBLUE}Certificate file path:{bcolors.ENDC} ").strip() try: - if cert_file and os.path.exists(cert_file): - config['cert_file'] = ConfigValidator.validate_cert_file(cert_file) - break - print(f"{bcolors.FAIL}Error: Certificate file not found{bcolors.ENDC}") + ConfigValidator.parse_expiration_time(expiration) + return {'token_expiration': expiration} except ValidationError as e: print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") - - # Certificate password validation (optional) - cert_password = input(f"{bcolors.OKBLUE}Certificate password:{bcolors.ENDC} ").strip() - if cert_password: - while True: - try: - config['cert_password'] = ConfigValidator.validate_certpassword(cert_password) + expiration = input(f"{bcolors.OKBLUE}Token expiration [Press Enter for never]:{bcolors.ENDC} ").strip() + if not expiration: break - except ValidationError as e: - print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") - cert_password = input(f"{bcolors.OKBLUE}Certificate password:{bcolors.ENDC} ").strip() - if not cert_password: - break - return config + return {'token_expiration': ''} def _get_config_path(self, config_path: str = None) -> str: """Get and validate config file path""" diff --git a/keepercommander/service/commands/slack_app_setup.py b/keepercommander/service/commands/slack_app_setup.py index e8f4a7dcc..e8d15ff93 100644 --- a/keepercommander/service/commands/slack_app_setup.py +++ b/keepercommander/service/commands/slack_app_setup.py @@ -22,7 +22,7 @@ from ..config.config_validation import ConfigValidator, ValidationError from ..docker import ( SetupResult, DockerSetupPrinter, DockerSetupConstants, - ServiceConfig, SlackConfig, DockerComposeBuilder + ServiceConfig, SlackConfig, DockerComposeBuilder, DockerSetupBase ) slack_app_setup_parser = argparse.ArgumentParser( @@ -31,8 +31,8 @@ formatter_class=argparse.RawDescriptionHelpFormatter ) slack_app_setup_parser.add_argument( - '--folder-name', dest='folder_name', type=str, default=DockerSetupConstants.DEFAULT_FOLDER_NAME, - help=f'Name for the shared folder (default: "{DockerSetupConstants.DEFAULT_FOLDER_NAME}")' + '--folder-name', dest='folder_name', type=str, default=DockerSetupConstants.DEFAULT_SLACK_FOLDER_NAME, + help=f'Name for the shared folder (default: "{DockerSetupConstants.DEFAULT_SLACK_FOLDER_NAME}")' ) slack_app_setup_parser.add_argument( '--app-name', dest='app_name', type=str, default=DockerSetupConstants.DEFAULT_APP_NAME, @@ -62,7 +62,7 @@ slack_app_setup_parser.exit = suppress_exit -class SlackAppSetupCommand(Command): +class SlackAppSetupCommand(Command, DockerSetupBase): """Automated Slack App integration setup command""" def get_parser(self): @@ -76,7 +76,7 @@ def execute(self, params, **kwargs): setup_result, service_config, config_path = self._run_base_docker_setup(params, kwargs) DockerSetupPrinter.print_completion("Service Mode Configuration Complete!") - + # Phase 2: Slack-specific setup print(f"\n{bcolors.BOLD}Phase 2: Slack App Integration Setup{bcolors.ENDC}") @@ -86,10 +86,10 @@ def execute(self, params, **kwargs): service_config, kwargs.get('slack_record_name', DockerSetupConstants.DEFAULT_SLACK_RECORD_NAME) ) - + # Print consolidated success message self._print_success_message(setup_result, service_config, slack_record_uid, slack_config, config_path) - + return def _run_base_docker_setup(self, params, kwargs: Dict[str, Any]) -> Tuple[SetupResult, Dict[str, Any], str]: @@ -103,14 +103,14 @@ def _run_base_docker_setup(self, params, kwargs: Dict[str, Any]) -> Tuple[SetupR config_path = kwargs.get('config_path') or os.path.expanduser('~/.keeper/config.json') if not os.path.isfile(config_path): raise CommandError('slack-app-setup', f'Config file not found: {config_path}') - + # Print header DockerSetupPrinter.print_header("Docker Setup") - + # Run core setup steps (Steps 1-7) setup_result = docker_cmd.run_setup_steps( params=params, - folder_name=kwargs.get('folder_name', DockerSetupConstants.DEFAULT_FOLDER_NAME), + folder_name=kwargs.get('folder_name', DockerSetupConstants.DEFAULT_SLACK_FOLDER_NAME), app_name=kwargs.get('app_name', DockerSetupConstants.DEFAULT_APP_NAME), record_name=kwargs.get('config_record_name', DockerSetupConstants.DEFAULT_RECORD_NAME), config_path=config_path, @@ -129,10 +129,10 @@ def _run_base_docker_setup(self, params, kwargs: Dict[str, Any]) -> Tuple[SetupR return setup_result, service_config, config_path def _get_slack_service_configuration(self) -> ServiceConfig: - """Get simplified service configuration for Slack App (only port needed)""" + """Get service configuration for Slack App (port + tunneling options)""" DockerSetupPrinter.print_header("Service Mode Configuration") - - # Only ask for port with validation + + # Port configuration print(f"{bcolors.BOLD}Port:{bcolors.ENDC}") print(f" The port on which Commander Service will listen") while True: @@ -143,20 +143,28 @@ def _get_slack_service_configuration(self) -> ServiceConfig: except ValidationError as e: print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") - # Fixed configuration for Slack App + # Get tunneling configuration + ngrok_config = self._get_ngrok_config() + + # Only ask for Cloudflare if ngrok is not enabled + if not ngrok_config['ngrok_enabled']: + cloudflare_config = self._get_cloudflare_config() + else: + cloudflare_config = {'cloudflare_enabled': False, 'cloudflare_tunnel_token': '', + 'cloudflare_custom_domain': '', 'cloudflare_public_url': ''} + return ServiceConfig( port=port, commands='search,share-record,share-folder,record-add,one-time-share,pedm,device-approve,get', queue_enabled=True, # Always enable queue mode (v2 API) - ngrok_enabled=False, - ngrok_auth_token='', - ngrok_custom_domain='', - cloudflare_enabled=False, - cloudflare_tunnel_token='', - cloudflare_custom_domain='', - tls_enabled=False, - cert_file='', - cert_password='' + ngrok_enabled=ngrok_config['ngrok_enabled'], + ngrok_auth_token=ngrok_config['ngrok_auth_token'], + ngrok_custom_domain=ngrok_config['ngrok_custom_domain'], + ngrok_public_url=ngrok_config.get('ngrok_public_url', ''), + cloudflare_enabled=cloudflare_config['cloudflare_enabled'], + cloudflare_tunnel_token=cloudflare_config['cloudflare_tunnel_token'], + cloudflare_custom_domain=cloudflare_config['cloudflare_custom_domain'], + cloudflare_public_url=cloudflare_config.get('cloudflare_public_url', '') ) def _run_slack_setup(self, params, setup_result: SetupResult, service_config: ServiceConfig, diff --git a/keepercommander/service/config/command_validator.py b/keepercommander/service/config/command_validator.py index 522cfaf0b..24f5a2907 100644 --- a/keepercommander/service/config/command_validator.py +++ b/keepercommander/service/config/command_validator.py @@ -139,7 +139,7 @@ def validate_command_list(self, commands: str, valid_commands: Set) -> str: else: invalid_commands.append(cmd) - return ", ".join(validated_commands), invalid_commands + return ",".join(validated_commands), invalid_commands def generate_command_error_message(self, invalid_commands: List[str], command_info: Dict[str, Any]) -> str: """Generate helpful error message for invalid commands.""" diff --git a/keepercommander/service/config/config_validation.py b/keepercommander/service/config/config_validation.py index e82b3ec04..c4cf07c6c 100644 --- a/keepercommander/service/config/config_validation.py +++ b/keepercommander/service/config/config_validation.py @@ -174,6 +174,11 @@ def validate_rate_limit(rate_limit: str) -> str: msg = ("Invalid rate limit format. Use formats like 'X/minute', 'X/hour', 'X/day', " "'X per minute', 'X per hour', or 'X per day'.") raise ValidationError(msg) + + # Extract the numeric value and check if it's 0 + numeric_value = int(re.match(r'^\d+', rate_limit).group()) + if numeric_value == 0: + raise ValidationError("Rate limit value cannot be 0. Please specify a positive number.") logger.debug("Rate limit validation successful") return rate_limit diff --git a/keepercommander/service/config/record_handler.py b/keepercommander/service/config/record_handler.py index 51ff416ba..f93f4efb5 100644 --- a/keepercommander/service/config/record_handler.py +++ b/keepercommander/service/config/record_handler.py @@ -26,16 +26,26 @@ def __init__(self): self.cli_handler = CommandHandler() @debug_decorator - def create_record(self, is_advanced_security_enabled: str, commands: str) -> Dict[str, Any]: + def create_record(self, is_advanced_security_enabled: str, commands: str, token_expiration: str = None, record_uid: str = None) -> Dict[str, Any]: """Create a new configuration record.""" api_key = generate_api_key() record = self._create_base_record(api_key, commands) - if is_advanced_security_enabled == "y": + # Handle token expiration - either from CLI arg (streamlined) or interactive prompt + if token_expiration: + # Streamlined mode - use provided expiration + self._set_expiration_from_string(record, token_expiration) + elif is_advanced_security_enabled == "y": + # Interactive mode - prompt for expiration logger.debug("Adding expiration to record (advanced security enabled)") self._add_expiration_to_record(record) - print(f'Generated API key: {api_key}') + # Docker mode: redact API key and show vault record UID + if record_uid: + redacted_key = f"****{api_key[-4:]}" if len(api_key) >= 4 else "****" + print(f'Generated API key: {redacted_key} (stored in vault record: {record_uid})') + else: + print(f'Generated API key: {api_key}') return record def update_or_add_record(self, params: KeeperParams, title: str, config_path: Path) -> None: @@ -113,23 +123,27 @@ def _create_base_record(self, api_key: str, commands: str) -> Dict[str, Any]: @debug_decorator def _add_expiration_to_record(self, record: Dict[str, Any]) -> None: - """Add expiration details to the record.""" + """Add expiration details to the record via interactive prompt.""" expiration_str = input( "Token Expiration Time (Xm, Xh, Xd) or empty for no expiration: " ).strip() if not expiration_str: - #record["expiration_of_token"] = "" record["expiration_timestamp"] = datetime(9999, 12, 31, 23, 59, 59).isoformat() print("API key set to never expire") return + if not self._set_expiration_from_string(record, expiration_str): + self._add_expiration_to_record(record) + + def _set_expiration_from_string(self, record: Dict[str, Any], expiration_str: str) -> bool: + """Set expiration timestamp from expiration string (e.g., 5m, 24h, 7d). Returns True on success.""" try: expiration_delta = self.validator.parse_expiration_time(expiration_str) expiration_time = datetime.now() + expiration_delta - #record["expiration_of_token"] = expiration_str record["expiration_timestamp"] = expiration_time.isoformat() print(f"API key will expire at: {record['expiration_timestamp']}") + return True except ValidationError as e: print(f"Error: {str(e)}") - self._add_expiration_to_record(record) \ No newline at end of file + return False \ No newline at end of file diff --git a/keepercommander/service/config/service_config.py b/keepercommander/service/config/service_config.py index b72f16b6c..35f1d0e89 100644 --- a/keepercommander/service/config/service_config.py +++ b/keepercommander/service/config/service_config.py @@ -27,7 +27,7 @@ VALID_CERT_EXTENSIONS = {".pem", ".crt", ".cer", ".key"} class ServiceConfig: - def __init__(self, title: str = 'Commander Service Mode'): + def __init__(self, title: str = 'Commander Service Mode Config'): self.title = title self.config = ConfigParser() @@ -300,10 +300,10 @@ def _get_validated_commands(self, params: KeeperParams) -> str: print(f"\nError: {str(e)}") print("\nPlease try again with valid commands.") - def create_record(self, is_advanced_security_enabled: str, params: KeeperParams, commands: Optional[str] = None) -> Dict[str, Any]: + def create_record(self, is_advanced_security_enabled: str, params: KeeperParams, commands: Optional[str] = None, token_expiration: str = None, record_uid: Optional[str] = None) -> Dict[str, Any]: """Create a new configuration record.""" commands = self.validate_command_list(commands, params) if commands else self._get_validated_commands(params) - return self.record_handler.create_record(is_advanced_security_enabled, commands) + return self.record_handler.create_record(is_advanced_security_enabled, commands, token_expiration, record_uid) def update_or_add_record(self, params: KeeperParams) -> None: """Update existing record or add new one.""" diff --git a/keepercommander/service/decorators/security.py b/keepercommander/service/decorators/security.py index 15ce97dbb..c588256c6 100644 --- a/keepercommander/service/decorators/security.py +++ b/keepercommander/service/decorators/security.py @@ -79,23 +79,17 @@ def is_ip_in_range(ip, ip_range): except ValueError: return False +def get_rate_limit(): + """Get configured rate limit""" + return ConfigReader.read_config("rate_limiting") or "60/minute" + +def get_rate_limit_key(): + """Generate rate limit key per IP + endpoint for separate limits per endpoint""" + return f"{get_remote_address()}:{request.endpoint}" + def security_check(fn): @wraps(fn) - def get_multiplied_rate_limit(): - """Get rate limit with appropriate multiplier based on API version""" - from flask import request - base_limit = ConfigReader.read_config("rate_limiting") - if base_limit: - import re - match = re.match(r'(\d+)(/\w+)', base_limit) - if match: - number, unit = match.groups() - # v2 API has 4 endpoints sharing the limit, v1 API has only 1 - multiplier = 1 if request.path.startswith('/api/v1') else 4 - return f"{int(number) * multiplier}{unit}" - return base_limit - - @limiter.limit(get_multiplied_rate_limit) + @limiter.limit(get_rate_limit, key_func=get_rate_limit_key) def wrapper(*args, **kwargs): client_ip = request.remote_addr try: diff --git a/keepercommander/service/docker/compose_builder.py b/keepercommander/service/docker/compose_builder.py index 66fc2ec20..f7498608f 100644 --- a/keepercommander/service/docker/compose_builder.py +++ b/keepercommander/service/docker/compose_builder.py @@ -78,7 +78,7 @@ def _build_commander_service(self) -> Dict[str, Any]: service = { 'container_name': 'keeper-service', - 'ports': [f"{self.config['port']}:{self.config['port']}"], + 'ports': [f"127.0.0.1:{self.config['port']}:{self.config['port']}"], 'image': 'keeper/commander:latest', 'command': ' '.join(self._service_cmd_parts), 'healthcheck': self._build_healthcheck(), @@ -121,36 +121,50 @@ def _build_service_command(self) -> None: f"-q {'y' if queue_enabled else 'n'}" ] + self._add_security_options() self._add_tunneling_options() - self._add_tls_options() self._add_docker_options() + def _add_security_options(self) -> None: + """Add advanced security options (IP filtering, rate limiting, encryption)""" + # IP allowed list (only add if not default) + allowed_ip = self.config.get('allowed_ip', '0.0.0.0/0,::/0') + if allowed_ip and allowed_ip != '0.0.0.0/0,::/0': + self._service_cmd_parts.append(f"-aip '{allowed_ip}'") + + # IP denied list + denied_ip = self.config.get('denied_ip', '') + if denied_ip: + self._service_cmd_parts.append(f"-dip '{denied_ip}'") + + # Rate limiting + rate_limit = self.config.get('rate_limit', '') + if rate_limit: + self._service_cmd_parts.append(f"-rl '{rate_limit}'") + + # Encryption (automatically enabled if encryption_key is provided) + encryption_key = self.config.get('encryption_key', '') + if encryption_key: + self._service_cmd_parts.append(f"-ek '{encryption_key}'") + + # Token expiration + token_expiration = self.config.get('token_expiration', '') + if token_expiration: + self._service_cmd_parts.append(f"-te '{token_expiration}'") + def _add_tunneling_options(self) -> None: """Add ngrok and Cloudflare tunneling options""" # Ngrok configuration - if self.config.get('ngrok_enabled') and self.config.get('ngrok_token'): - self._service_cmd_parts.append(f"-ng {self.config['ngrok_token']}") - if self.config.get('ngrok_domain'): - self._service_cmd_parts.append(f"-cd {self.config['ngrok_domain']}") + if self.config.get('ngrok_enabled') and self.config.get('ngrok_auth_token'): + self._service_cmd_parts.append(f"-ng {self.config['ngrok_auth_token']}") + if self.config.get('ngrok_custom_domain'): + self._service_cmd_parts.append(f"-cd {self.config['ngrok_custom_domain']}") # Cloudflare configuration - if self.config.get('cloudflare_enabled') and self.config.get('cloudflare_token'): - self._service_cmd_parts.append(f"-cf {self.config['cloudflare_token']}") - if self.config.get('cloudflare_domain'): - self._service_cmd_parts.append(f"-cfd {self.config['cloudflare_domain']}") - - def _add_tls_options(self) -> None: - """Add TLS certificate options and volumes""" - if self.config.get('tls_enabled') and self.config.get('cert_file'): - cert_file = self.config['cert_file'] - cert_basename = os.path.basename(cert_file) - - self._service_cmd_parts.append(f"-crtf /certs/{cert_basename}") - if self.config.get('cert_password'): - self._service_cmd_parts.append(f"-crtp {self.config['cert_password']}") - - # Add volume mount for certificate - self._volumes.append(f"{cert_file}:/certs/{cert_basename}:ro") + if self.config.get('cloudflare_enabled') and self.config.get('cloudflare_tunnel_token'): + self._service_cmd_parts.append(f"-cf {self.config['cloudflare_tunnel_token']}") + if self.config.get('cloudflare_custom_domain'): + self._service_cmd_parts.append(f"-cfd {self.config['cloudflare_custom_domain']}") def _add_docker_options(self) -> None: """Add Docker-specific parameters (KSM config, record UIDs)""" diff --git a/keepercommander/service/docker/models.py b/keepercommander/service/docker/models.py index 44824f9a2..04a5d74b3 100644 --- a/keepercommander/service/docker/models.py +++ b/keepercommander/service/docker/models.py @@ -23,11 +23,15 @@ class DockerSetupConstants: """Constants for Docker setup command""" - # Default resource names - DEFAULT_FOLDER_NAME = 'CSMD Folder' - DEFAULT_APP_NAME = 'CSMD KSM App' - DEFAULT_RECORD_NAME = 'CSMD Config' - DEFAULT_SLACK_RECORD_NAME = 'CSMD Slack Config' + # Default resource names for service-docker-setup + DEFAULT_FOLDER_NAME = 'Commander Service Mode - Docker' + DEFAULT_APP_NAME = 'Commander Service Mode - KSM App' + DEFAULT_RECORD_NAME = 'Commander Service Mode Docker Config' + DEFAULT_CLIENT_NAME = 'Commander Service Mode - KSM App Client' + + # Default resource names for slack-app-setup + DEFAULT_SLACK_FOLDER_NAME = 'Commander Service Mode - Slack App' + DEFAULT_SLACK_RECORD_NAME = 'Commander Service Mode Slack App Config' # Default service configuration DEFAULT_PORT = 8900 @@ -82,9 +86,14 @@ class ServiceConfig: cloudflare_enabled: bool cloudflare_tunnel_token: str cloudflare_custom_domain: str - tls_enabled: bool - cert_file: str - cert_password: str + allowed_ip: str = '0.0.0.0/0,::/0' + denied_ip: str = '' + rate_limit: str = '' + encryption_enabled: bool = False + encryption_key: str = '' + token_expiration: str = '' + ngrok_public_url: str = '' + cloudflare_public_url: str = '' @dataclass diff --git a/keepercommander/service/docker/setup_base.py b/keepercommander/service/docker/setup_base.py index 15b9a4c3f..c87a544ba 100644 --- a/keepercommander/service/docker/setup_base.py +++ b/keepercommander/service/docker/setup_base.py @@ -18,17 +18,21 @@ import io import json +import logging import os import sys import tempfile +from typing import Dict, Any from ...commands.folder import FolderMakeCommand from ...commands.ksm import KSMCommand from ... import api, vault, utils, attachment, record_management, loginv3 +from ...display import bcolors from ...error import CommandError -from .models import SetupResult, SetupStep +from .models import SetupResult, SetupStep, DockerSetupConstants from .printer import DockerSetupPrinter +from ..config.config_validation import ConfigValidator, ValidationError class DockerSetupBase: @@ -112,7 +116,13 @@ def _setup_device(self, params, timeout: str) -> None: # Timeout DockerSetupPrinter.print_success(f"Setting logout timeout to {timeout}...") - ThisDeviceCommand().execute(params, ops=['timeout', timeout]) + # Suppress command output + old_stdout = sys.stdout + sys.stdout = io.StringIO() + try: + ThisDeviceCommand().execute(params, ops=['timeout', timeout]) + finally: + sys.stdout = old_stdout except Exception as e: raise CommandError('docker-setup', f'Device setup failed: {str(e)}') @@ -272,9 +282,12 @@ def _share_folder_with_app(self, params, app_uid: str, folder_uid: str) -> None: if not app_rec: raise CommandError('docker-setup', 'App not found') - # Suppress output + # Suppress all output (stdout and logging) old_stdout = sys.stdout + old_log_level = logging.root.level + sys.stdout = io.StringIO() + logging.root.setLevel(logging.CRITICAL + 1) # Disable all logging try: KSMCommand.add_app_share( params, @@ -284,15 +297,15 @@ def _share_folder_with_app(self, params, app_uid: str, folder_uid: str) -> None: ) finally: sys.stdout = old_stdout - - DockerSetupPrinter.print_success("Folder shared with app successfully") + logging.root.setLevel(old_log_level) + DockerSetupPrinter.print_success("Folder shared with app") except Exception as e: raise CommandError('docker-setup', f'Failed to share folder with app: {str(e)}') def _create_client_device(self, params, app_uid: str, app_name: str) -> str: """Create client device and return b64 config""" try: - client_name = f"{app_name} Docker Client" + client_name = DockerSetupConstants.DEFAULT_CLIENT_NAME tokens_and_devices = KSMCommand.add_client( params=params, @@ -316,3 +329,73 @@ def _create_client_device(self, params, app_uid: str, app_name: str) -> str: except Exception as e: raise CommandError('docker-setup', f'Failed to create client device: {str(e)}') + # ======================== + # Shared Configuration Methods + # ======================== + + def _get_ngrok_config(self) -> Dict[str, Any]: + """Get ngrok configuration""" + print(f"\n{bcolors.BOLD}Ngrok Tunneling (optional):{bcolors.ENDC}") + print(f" Generate a public URL for your service using ngrok") + use_ngrok = input(f"{bcolors.OKBLUE}Enable ngrok? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + + config = {'ngrok_enabled': use_ngrok, 'ngrok_auth_token': '', 'ngrok_custom_domain': '', 'ngrok_public_url': ''} + + if use_ngrok: + while True: + token = input(f"{bcolors.OKBLUE}Ngrok auth token:{bcolors.ENDC} ").strip() + try: + config['ngrok_auth_token'] = ConfigValidator.validate_ngrok_token(token) + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + + # Validate custom domain if provided (ngrok allows subdomain prefixes) + domain = input(f"{bcolors.OKBLUE}Ngrok custom domain [Press Enter to skip]:{bcolors.ENDC} ").strip() + if domain: + while True: + try: + config['ngrok_custom_domain'] = ConfigValidator.validate_domain(domain, require_tld=False) + # Construct ngrok public URL + if '.' not in config['ngrok_custom_domain']: + config['ngrok_public_url'] = f"https://{config['ngrok_custom_domain']}.ngrok.io" + else: + config['ngrok_public_url'] = f"https://{config['ngrok_custom_domain']}" + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + domain = input(f"{bcolors.OKBLUE}Ngrok custom domain [Press Enter to skip]:{bcolors.ENDC} ").strip() + if not domain: + break + + return config + + def _get_cloudflare_config(self) -> Dict[str, Any]: + """Get Cloudflare configuration""" + print(f"\n{bcolors.BOLD}Cloudflare Tunneling (optional):{bcolors.ENDC}") + print(f" Generate a public URL for your service using Cloudflare") + use_cloudflare = input(f"{bcolors.OKBLUE}Enable Cloudflare? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + + config = {'cloudflare_enabled': use_cloudflare, 'cloudflare_tunnel_token': '', + 'cloudflare_custom_domain': '', 'cloudflare_public_url': ''} + + if use_cloudflare: + while True: + token = input(f"{bcolors.OKBLUE}Cloudflare tunnel token:{bcolors.ENDC} ").strip() + try: + config['cloudflare_tunnel_token'] = ConfigValidator.validate_cloudflare_token(token) + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + + while True: + domain = input(f"{bcolors.OKBLUE}Cloudflare custom domain:{bcolors.ENDC} ").strip() + try: + config['cloudflare_custom_domain'] = ConfigValidator.validate_domain(domain) + # Construct cloudflare public URL + config['cloudflare_public_url'] = f"https://{config['cloudflare_custom_domain']}" + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + + return config diff --git a/unit-tests/service/test_config_validation.py b/unit-tests/service/test_config_validation.py index f1482d090..77b28aa9e 100644 --- a/unit-tests/service/test_config_validation.py +++ b/unit-tests/service/test_config_validation.py @@ -83,6 +83,10 @@ def test_validate_rate_limit_invalid(self): 'abc', '10/second', '100 by hour', + '0/minute', + '0/hour', + '0/day', + '0 per minute', ] for limit in invalid_limits: with self.subTest(limit=limit): diff --git a/unit-tests/service/test_create_service.py b/unit-tests/service/test_create_service.py index 4c61c839a..a81017ea8 100644 --- a/unit-tests/service/test_create_service.py +++ b/unit-tests/service/test_create_service.py @@ -41,7 +41,7 @@ def test_execute_service_already_running(self, mock_service_manager): def test_handle_configuration_streamlined(self): """Test streamlined configuration handling.""" config_data = self.command.service_config.create_default_config() - args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y', update_vault_record=None) + args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y', update_vault_record=None, ratelimit=None, encryption_key=None, token_expiration=None) with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: self.command._handle_configuration(config_data, self.params, args) @@ -50,7 +50,7 @@ def test_handle_configuration_streamlined(self): def test_handle_configuration_interactive(self): """Test interactive configuration handling.""" config_data = self.command.service_config.create_default_config() - args = StreamlineArgs(port=None, commands=None, ngrok=None, allowedip='' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled=None, update_vault_record=None) + args = StreamlineArgs(port=None, commands=None, ngrok=None, allowedip='' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled=None, update_vault_record=None, ratelimit=None, encryption_key=None, token_expiration=None) with patch.object(self.command.config_handler, 'handle_interactive_config') as mock_interactive, \ patch.object(self.command.security_handler, 'configure_security') as mock_security: @@ -61,7 +61,7 @@ def test_handle_configuration_interactive(self): def test_create_and_save_record(self): """Test record creation and saving.""" config_data = self.command.service_config.create_default_config() - args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y', update_vault_record=None) + args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y', update_vault_record=None, ratelimit=None, encryption_key=None, token_expiration=None) with patch.object(self.command.service_config, 'create_record') as mock_create_record, \ patch.object(self.command.service_config, 'save_config') as mock_save_config: @@ -72,7 +72,9 @@ def test_create_and_save_record(self): mock_create_record.assert_called_once_with( config_data["is_advanced_security_enabled"], self.params, - args.commands + args.commands, + args.token_expiration, + None # record_uid (update_vault_record is None) ) if(args.fileformat): config_data["fileformat"]= args.fileformat @@ -81,7 +83,7 @@ def test_create_and_save_record(self): def test_validation_error_handling(self): """Test handling of validation errors during execution.""" - args = StreamlineArgs(port=-1, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y', update_vault_record=None) + args = StreamlineArgs(port=-1, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y', update_vault_record=None, ratelimit=None, encryption_key=None, token_expiration=None) with patch('builtins.print') as mock_print: with patch.object(self.command.service_config, 'create_default_config') as mock_create_config: @@ -107,7 +109,10 @@ def test_cloudflare_streamlined_configuration(self): fileformat='json', run_mode='foreground', queue_enabled='y', - update_vault_record=None + update_vault_record=None, + ratelimit=None, + encryption_key=None, + token_expiration=None ) with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: @@ -130,7 +135,10 @@ def test_cloudflare_validation_missing_token(self): fileformat='json', run_mode='foreground', queue_enabled='y', - update_vault_record=None + update_vault_record=None, + ratelimit=None, + encryption_key=None, + token_expiration=None ) with patch('builtins.print') as mock_print: @@ -155,7 +163,10 @@ def test_cloudflare_validation_missing_domain(self): fileformat='json', run_mode='foreground', queue_enabled='y', - update_vault_record=None + update_vault_record=None, + ratelimit=None, + encryption_key=None, + token_expiration=None ) with patch('builtins.print') as mock_print: @@ -180,7 +191,10 @@ def test_cloudflare_and_ngrok_mutual_exclusion(self): fileformat='json', run_mode='foreground', queue_enabled='y', - update_vault_record=None + update_vault_record=None, + ratelimit=None, + encryption_key=None, + token_expiration=None ) with patch('builtins.print') as mock_print: @@ -216,7 +230,10 @@ def test_cloudflare_tunnel_startup_success(self, mock_cloudflare_configure): fileformat='json', run_mode='foreground', queue_enabled='y', - update_vault_record=None + update_vault_record=None, + ratelimit=None, + encryption_key=None, + token_expiration=None ) with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: @@ -263,7 +280,10 @@ def test_cloudflare_tunnel_startup_failure(self, mock_get_status, mock_start_ser fileformat='json', run_mode='foreground', queue_enabled='y', - update_vault_record=None + update_vault_record=None, + ratelimit=None, + encryption_key=None, + token_expiration=None ) # Verify that the error was printed @@ -286,7 +306,10 @@ def test_cloudflare_token_validation(self): fileformat='json', run_mode='foreground', queue_enabled='y', - update_vault_record=None + update_vault_record=None, + ratelimit=None, + encryption_key=None, + token_expiration=None ) with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: @@ -311,7 +334,10 @@ def test_cloudflare_domain_validation(self): fileformat='json', run_mode='foreground', queue_enabled='y', - update_vault_record=None + update_vault_record=None, + ratelimit=None, + encryption_key=None, + token_expiration=None ) with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: diff --git a/unit-tests/service/test_service_config.py b/unit-tests/service/test_service_config.py index 2dc97b9f3..4a99d6b12 100644 --- a/unit-tests/service/test_service_config.py +++ b/unit-tests/service/test_service_config.py @@ -35,7 +35,7 @@ def setUp(self): def test_create_default_config(self): """Test creation of default configuration.""" config = self.service_config.create_default_config() - self.assertEqual(config["title"], "Commander Service Mode") + self.assertEqual(config["title"], "Commander Service Mode Config") self.assertIsNone(config["port"]) self.assertEqual(config["ngrok"], "n") self.assertEqual(config["ngrok_auth_token"], "") @@ -111,7 +111,7 @@ def test_validate_command_list_valid(self, mock_cli_handler): """ params = MagicMock(spec=KeeperParams) result = self.service_config.validate_command_list("ls, get", params) - self.assertEqual(result, "ls, get") + self.assertEqual(result, "ls,get") @patch.object(ServiceConfig, 'cli_handler') def test_validate_command_list_invalid(self, mock_cli_handler): From a7a8b7559a948933e69793f3dff1c5c25fc7b0d0 Mon Sep 17 00:00:00 2001 From: pvagare-ks Date: Wed, 21 Jan 2026 21:11:57 +0530 Subject: [PATCH 21/24] Streamline enterprise/msp node, role, and team management and bug fix (#1773) (#1774) * Streamline enterprise node, role, and team management bug fix (#1773) * updated status code * added status code 207 --- keepercommander/commands/enterprise.py | 243 ++++++++++++++++-- keepercommander/service/util/command_util.py | 15 +- .../service/util/parse_keeper_response.py | 124 ++++++++- unit-tests/test_command_enterprise.py | 7 + 4 files changed, 360 insertions(+), 29 deletions(-) diff --git a/keepercommander/commands/enterprise.py b/keepercommander/commands/enterprise.py index f4c2c7712..25c9bc212 100644 --- a/keepercommander/commands/enterprise.py +++ b/keepercommander/commands/enterprise.py @@ -132,6 +132,7 @@ def register_command_info(aliases, command_info): enterprise_node_parser = argparse.ArgumentParser(prog='enterprise-node', description='Manage an enterprise node') +enterprise_node_parser.add_argument('-f', '--force', dest='force', action='store_true', help='do not prompt for confirmation') enterprise_node_parser.add_argument('--wipe-out', dest='wipe_out', action='store_true', help='wipe out node content') enterprise_node_parser.add_argument('--add', dest='add', action='store_true', help='create node') enterprise_node_parser.add_argument('--parent', dest='parent', action='store', help='Parent Node Name or ID') @@ -1285,7 +1286,7 @@ def traverse_to_root(node_id, depth): if not node.get('parent_id'): raise CommandError('enterprise-node', 'Cannot wipe out root node') - answer = user_choice( + answer = 'y' if kwargs.get('force') else user_choice( bcolors.FAIL + bcolors.BOLD + '\nALERT!\n' + bcolors.ENDC + 'This action cannot be undone.\n\n' + 'Do you want to proceed with deletion?', 'yn', 'n') @@ -1309,14 +1310,15 @@ def traverse_to_root(node_id, depth): roles = [x for x in params.enterprise['roles'] if x['node_id'] in nodes] role_set = set([x['role_id'] for x in managed_nodes]) role_set = role_set.union([x['role_id'] for x in roles]) - for ru in params.enterprise['role_users']: - if ru['role_id'] in role_set: - rq = { - 'command': 'role_user_remove', - 'role_id': ru['role_id'], - 'enterprise_user_id': ru['enterprise_user_id'] - } - request_batch.append(rq) + if 'role_users' in params.enterprise: + for ru in params.enterprise['role_users']: + if ru['role_id'] in role_set: + rq = { + 'command': 'role_user_remove', + 'role_id': ru['role_id'], + 'enterprise_user_id': ru['enterprise_user_id'] + } + request_batch.append(rq) for mn in managed_nodes: rq = { 'command': 'role_managed_node_remove', @@ -1356,6 +1358,12 @@ def traverse_to_root(node_id, depth): 'node_id': node_id } request_batch.append(rq) + + # Check if there's anything to wipe out + if not request_batch: + node_name = node.get('data', {}).get('displayname') or str(node['node_id']) + logging.info('Node \'%s\' is empty. Nothing to wipe out.', node_name) + return elif parent_id or kwargs.get('displayname'): display_name = kwargs.get('displayname') def is_in_chain(node_id, parent_id): @@ -2157,17 +2165,62 @@ def execute(self, params, **kwargs): raise CommandError('enterprise-user', 'No root nodes were detected. Specify --node parameter') node_id = root_nodes[0] + # Collect role_ids for newly created roles + new_role_ids = [] for role_name in role_names: data = json.dumps({ "displayname": role_name }).encode('utf-8') + role_id = self.get_enterprise_id(params) + new_role_ids.append(role_id) rq = { "command": "role_add", - "role_id": self.get_enterprise_id(params), + "role_id": role_id, "node_id": node_id, "encrypted_data": utils.base64_url_encode(crypto.encrypt_aes_v1(data, tree_key)), "visible_below": (kwargs.get('visible_below') == 'on') or False, "new_user_inherit": (kwargs.get('new_user') == 'on') or False } request_batch.append(rq) + + if kwargs.get('add_admin') and new_role_ids: + skip_display = True + node_lookup = {} + if 'nodes' in params.enterprise: + for node in params.enterprise['nodes']: + node_lookup[str(node['node_id'])] = node + if node.get('parent_id'): + node_name = node['data'].get('displayname') + else: + node_name = params.enterprise['enterprise_name'] + node_name = node_name.lower() + value = node_lookup.get(node_name) + if value is None: + value = node + elif type(value) == list: + value.append(node) + else: + value = [value, node] + node_lookup[node_name] = value + + admin_nodes = {} + for admin_node_name in kwargs.get('add_admin'): + value = node_lookup.get(admin_node_name.lower()) + if value is None: + logging.warning('Node %s could not be resolved', admin_node_name) + elif isinstance(value, dict): + admin_nodes[value['node_id']] = value['data'].get('displayname') or params.enterprise['enterprise_name'] + elif isinstance(value, list): + logging.warning('Node name \'%s\' is not unique. Use Node ID. Skipping', admin_node_name) + + for role_id in new_role_ids: + for admin_node_id, admin_node_display_name in admin_nodes.items(): + rq = { + "command": "role_managed_node_add", + "role_id": role_id, + "managed_node_id": admin_node_id, + "cascade_node_management": (kwargs.get('cascade') == 'on') or False, + "tree_keys": [] + } + request_batch.append(rq) else: for role_name in role_names: logging.warning('Role %s is not found: Skipping', role_name) @@ -3057,6 +3110,88 @@ class EnterpriseTeamCommand(EnterpriseCommand): def get_parser(self): return enterprise_team_parser + @staticmethod + def _resolve_users(params, user_list): + """Resolve user names/IDs to user objects. Returns dict {user_id: user_node}""" + users = {} + for u in user_list: + uname = u.lower() + user_node = None + if 'users' in params.enterprise: + for user in params.enterprise['users']: + if uname in {str(user['enterprise_user_id']), user['username'].lower()}: + user_node = user + break + if user_node: + users[user_node['enterprise_user_id']] = user_node + else: + logging.warning('User %s could not be resolved', u) + return users + + @staticmethod + def _create_add_user_request(params, team_uid, team_key, user, hsf_flag): + """Create a request to add a user to a team with proper encryption""" + user_id = user['enterprise_user_id'] + username = user['username'] + + api.load_user_public_keys(params, [username], False) + user_keys = params.key_cache.get(username) + + if not user_keys: + logging.warning('Cannot get user %s public key', username) + return None + + rq = { + 'command': 'team_enterprise_user_add', + 'team_uid': team_uid, + 'enterprise_user_id': user_id, + 'user_type': 2 if hsf_flag == 'on' else 1 if hsf_flag else 0, + } + + if params.forbid_rsa: + if user_keys.ec: + ec_key = crypto.load_ec_public_key(user_keys.ec) + encrypted_team_key = crypto.encrypt_ec(team_key, ec_key) + rq['team_key'] = utils.base64_url_encode(encrypted_team_key) + rq['team_key_type'] = 'encrypted_by_public_key_ecc' + else: + logging.warning('User %s does not have EC key', username) + return None + else: + if user_keys.rsa: + rsa_key = crypto.load_rsa_public_key(user_keys.rsa) + encrypted_team_key = crypto.encrypt_rsa(team_key, rsa_key) + rq['team_key'] = utils.base64_url_encode(encrypted_team_key) + rq['team_key_type'] = 'encrypted_by_public_key' + else: + logging.warning('User %s does not have RSA key', username) + return None + + return rq + + @staticmethod + def _resolve_roles(params, role_list): + """Resolve role names/IDs to role objects. Returns dict {role_id: role_name} excluding admin roles""" + role_changes = {} + for role in role_list: + role_node = next(( + r for r in params.enterprise['roles'] + if role in (str(r['role_id']), r['data'].get('displayname')) + ), None) + if role_node: + # Check if role has administrative permissions + is_managed_role = any( + mn['role_id'] == role_node['role_id'] + for mn in params.enterprise.get('managed_nodes', []) + ) + if is_managed_role: + logging.warning('Teams cannot be assigned to roles with administrative permissions.') + else: + role_changes[role_node['role_id']] = role_node['data'].get('displayname') + else: + logging.warning('Role %s cannot be resolved', role) + return role_changes + def execute(self, params, **kwargs): if (kwargs.get('add') or kwargs.get('approve')) and kwargs.get('remove'): raise CommandError('enterprise-team', "'add'/'approve' and 'delete' commands are mutually exclusive.") @@ -3110,7 +3245,8 @@ def execute(self, params, **kwargs): matched_teams = list(matched.values()) request_batch = [] non_batch_update_msgs = [] - has_warnings = False + has_warnings = False + new_team_roles = None if kwargs.get('add') or kwargs.get('approve'): queue = [] @@ -3133,6 +3269,8 @@ def execute(self, params, **kwargs): raise CommandError('enterprise-user', 'No root nodes were detected. Specify --node parameter') node_id = root_nodes[0] + new_teams = {} # {team_uid: (team_name, team_key, is_new)} + for item in queue: is_new_team = type(item) == str team_name = item if is_new_team else item['name'] @@ -3164,6 +3302,35 @@ def execute(self, params, **kwargs): rq['private_key'] = utils.base64_url_encode(encrypted_rsa_private_key) request_batch.append(rq) + + if is_new_team: + new_teams[team_uid] = (team_name, team_key, True) + + if kwargs.get('add_user') and new_teams: + skip_display = True + users = self._resolve_users(params, kwargs.get('add_user')) + if not users: + has_warnings = True + + hsf = kwargs.get('hide_shared_folders') or '' + for team_uid, (team_name, team_key, is_new) in new_teams.items(): + for user_id, user in users.items(): + if user['status'] == 'active': + rq = self._create_add_user_request(params, team_uid, team_key, user, hsf) + if rq: + request_batch.append(rq) + else: + request_batch.append({ + 'command': 'team_queue_user', + 'team_uid': team_uid, + 'enterprise_user_id': user_id + }) + + # Role additions for new teams will be handled after team creation + new_team_roles = None + if kwargs.get('add_role') and new_teams: + skip_display = True + new_team_roles = (new_teams, kwargs.get('add_role')) else: for team_name in team_names: logging.warning('\'%s\' team is not found: Skipping', team_name) @@ -3280,11 +3447,31 @@ def execute(self, params, **kwargs): 'enterprise_user_id': user_id } else: - rq = { - 'command': 'team_enterprise_user_remove', - 'team_uid': team['team_uid'], - 'enterprise_user_id': user_id - } + is_member = False + username = user['username'] + team_name = team['name'] + + # Check in active team members + if 'team_users' in params.enterprise: + is_member = any(1 for t in params.enterprise['team_users'] + if t['team_uid'] == team_uid and t['enterprise_user_id'] == user_id) + + # Check in queued team members + if not is_member and 'queued_team_users' in params.enterprise: + for qtu in params.enterprise['queued_team_users']: + if qtu['team_uid'] == team_uid and user_id in qtu.get('users', []): + is_member = True + break + + if is_member: + rq = { + 'command': 'team_enterprise_user_remove', + 'team_uid': team['team_uid'], + 'enterprise_user_id': user_id + } + else: + logging.warning('User %s is not a member of team \'%s\'', username, team_name) + has_warnings = True if rq: request_batch.append(rq) elif node_id or kwargs.get('name') or kwargs.get('restrict_edit') or kwargs.get('restrict_share') or kwargs.get('restrict_view'): @@ -3333,11 +3520,35 @@ def execute(self, params, **kwargs): else: logging.warning('\'%s\' %s team failed to %s user %s: %s', team_name, 'queued' if command == 'team_queue_user' else '', 'delete' if command == 'team_enterprise_user_remove' else 'add', user_name, rs['message']) + elif command in {'role_team_add', 'role_team_remove'}: + role_id = rq.get('role_id') + role_name = next((r['data'].get('displayname') for r in params.enterprise.get('roles', []) + if r['role_id'] == role_id), str(role_id)) + action = 'assign' if command == 'role_team_add' else 'remove' + if rs['result'] == 'success': + logging.info('\'%s\' role %sed to team \'%s\'', role_name, action, team_name) + else: + logging.warning('Failed to %s role \'%s\' to/from team \'%s\': %s', action, role_name, team_name, rs['message']) if request_batch or len(non_batch_update_msgs) > 0: for update_msg in non_batch_update_msgs: logging.info(update_msg) api.query_enterprise(params) + + # Handle role additions for newly created teams (must be done after team exists) + if new_team_roles: + new_teams_dict, role_list = new_team_roles + # Fetch updated team data to get proper team objects + created_teams = [] + for team_uid in new_teams_dict.keys(): + team_data = next((t for t in params.enterprise.get('teams', []) if t['team_uid'] == team_uid), None) + if team_data: + created_teams.append(team_data) + + if created_teams: + role_msgs = self.change_team_roles(params, created_teams, role_list, None) + for msg in role_msgs: + logging.info(msg) elif not has_warnings: for team in matched_teams: print('\n') diff --git a/keepercommander/service/util/command_util.py b/keepercommander/service/util/command_util.py index 048039ba3..a6ce48250 100644 --- a/keepercommander/service/util/command_util.py +++ b/keepercommander/service/util/command_util.py @@ -148,9 +148,18 @@ def execute(cls, command: str) -> Tuple[Any, int]: # Always let the parser handle the response (including empty responses and logs) response = parse_keeper_response(command, response, log_output) - status_code = 200 - if isinstance(response, dict) and response.get("status") == "error": - status_code = 400 + if isinstance(response, dict): + # Extract status_code and remove it from response body + if 'status_code' in response: + status_code = response.pop('status_code') + elif response.get("status") == "error": + status_code = 400 + elif response.get("status") == "warning": + status_code = 400 + else: + status_code = 200 + else: + status_code = 200 response = CommandExecutor.encrypt_response(response) logger.debug(f"Command executed successfully") diff --git a/keepercommander/service/util/parse_keeper_response.py b/keepercommander/service/util/parse_keeper_response.py index 049590cfd..0ffdfc97d 100644 --- a/keepercommander/service/util/parse_keeper_response.py +++ b/keepercommander/service/util/parse_keeper_response.py @@ -930,38 +930,142 @@ def _parse_logging_based_command(command: str, response_str: str) -> Dict[str, A # Filter out biometric and persistent login messages for cleaner API responses response_str = KeeperResponseParser._filter_login_messages(response_str) - # Determine status based on common patterns + # Determine status and status_code based on patterns status = "success" + status_code = None + + response_lower = response_str.lower() + + success_indicators = [ + "created", "added", "removed", "updated", "deleted", + "successfully", "completed", "done" + ] + + has_success_indicator = any(indicator in response_lower for indicator in success_indicators) + + forbidden_patterns = [ + "not an msp administrator", + "permission denied", + "access denied", + "unauthorized access", + "forbidden", + "must be a root admin", + "admin privileges required", + "admin account required", + "insufficient privileges", + "not authorized", + "is restricted to", + "command is restricted" + ] + + conflict_patterns = [ + "already a member", + "already exists", + "already in", + "already accepted", + "duplicate" + ] + + not_found_patterns = [ + "could not be resolved", + "is not found", + "not found", + "does not exist", + "not a member of", + "cannot be found", + "cannot find" + ] + + bad_request_patterns = [ + "invalid", + "not valid", + "not allowed", + "not unique", + "unrecognized", + "reserved", + "character", + "empty", + "cannot be", + "cannot assign", + "cannot move", + "cannot get", + "not integer" + ] - # Check for error patterns (case insensitive) error_patterns = [ - "error", "failed", "invalid", "not found", "does not exist", - "permission denied", "unauthorized", "cannot be", "character", "reserved", "unrecognized" + "error", "failed", "failure" ] - # Check for warning patterns - warning_patterns = ["warning:", "already exists"] + warning_patterns = ["warning:", "skipping"] - response_lower = response_str.lower() + has_forbidden = any(pattern in response_lower for pattern in forbidden_patterns) + has_not_found = any(pattern in response_lower for pattern in not_found_patterns) + has_conflict = any(pattern in response_lower for pattern in conflict_patterns) + has_bad_request = any(pattern in response_lower for pattern in bad_request_patterns) + has_error = any(pattern in response_lower for pattern in error_patterns) + has_warning = any(pattern in response_lower for pattern in warning_patterns) - if any(pattern in response_lower for pattern in error_patterns): + if has_success_indicator and (has_not_found or has_bad_request or has_error): + return { + "status": "partial_success", + "status_code": 207, + "command": command.split()[0] if command.split() else command, + "message": response_str, + "data": None + } + + if has_forbidden: return { "status": "error", + "status_code": 403, "command": command.split()[0] if command.split() else command, "error": response_str } - elif any(pattern in response_lower for pattern in warning_patterns): + elif has_not_found: + return { + "status": "error", + "status_code": 500, + "command": command.split()[0] if command.split() else command, + "error": response_str + } + elif has_conflict: + return { + "status": "warning", + "status_code": 409, + "command": command.split()[0] if command.split() else command, + "message": response_str, + "data": None + } + elif has_bad_request: + return { + "status": "error", + "status_code": 400, + "command": command.split()[0] if command.split() else command, + "error": response_str + } + elif has_error: + return { + "status": "error", + "status_code": 500, + "command": command.split()[0] if command.split() else command, + "error": response_str + } + elif has_warning: status = "warning" + status_code = 400 # Return the actual log message with proper formatting if response_str: formatted_message = KeeperResponseParser._format_multiline_message(response_str) - return { + result = { "status": status, "command": command.split()[0] if command.split() else command, "message": formatted_message, "data": None } + if status_code: + result["status_code"] = status_code + return result else: # No output after cleaning - use existing empty response handler return KeeperResponseParser._handle_empty_response(command) diff --git a/unit-tests/test_command_enterprise.py b/unit-tests/test_command_enterprise.py index 7bdcea8f9..ac8a0bbde 100644 --- a/unit-tests/test_command_enterprise.py +++ b/unit-tests/test_command_enterprise.py @@ -196,6 +196,13 @@ def test_enterprise_team_user(self): cmd.execute(params, add_user=[ent_env.user2_email], team=[ent_env.team1_uid]) self.assertEqual(len(TestEnterprise.expected_commands), 0) + # Manually update the mock data to reflect that user2 is now in team1 + params.enterprise['team_users'].append({ + 'team_uid': ent_env.team1_uid, + 'enterprise_user_id': ent_env.user2_id, + 'user_type': 0 + }) + TestEnterprise.expected_commands = ['team_enterprise_user_remove'] cmd.execute(params, remove_user=[ent_env.user2_email], team=[ent_env.team1_uid]) self.assertEqual(len(TestEnterprise.expected_commands), 0) From 60abbc66fdac3d88d2bd125aa984213970d59055 Mon Sep 17 00:00:00 2001 From: Sergey Kolupaev Date: Wed, 14 Jan 2026 18:18:46 -0800 Subject: [PATCH 22/24] KEPM: offline registration --- keepercommander/commands/pedm/pedm_admin.py | 140 ++++++++++++++++- keepercommander/pedm/admin_plugin.py | 23 ++- .../proto/NotificationCenter_pb2.py | 54 +++---- .../proto/NotificationCenter_pb2.pyi | 30 +++- keepercommander/proto/pedm_pb2.py | 144 ++++++++++-------- keepercommander/proto/pedm_pb2.pyi | 70 +++++++++ 6 files changed, 363 insertions(+), 98 deletions(-) diff --git a/keepercommander/commands/pedm/pedm_admin.py b/keepercommander/commands/pedm/pedm_admin.py index 645966c56..dcc3410a0 100644 --- a/keepercommander/commands/pedm/pedm_admin.py +++ b/keepercommander/commands/pedm/pedm_admin.py @@ -180,6 +180,7 @@ def __init__(self): self.register_command_new(PedmPolicyCommand(), 'policy', 'p') self.register_command_new(PedmCollectionCommand(), 'collection', 'c') self.register_command_new(PedmApprovalCommand(), 'approval') + self.register_command_new(PedmOfflineCommand(), 'offline') self.register_command_new(PedmScimCommand(), 'scim') self.register_command_new(pedm_aram.PedmReportCommand(), 'report') #self.register_command_new(PedmBICommand(), 'bi') @@ -1593,7 +1594,7 @@ def execute(self, context: KeeperParams, **kwargs) -> None: class PedmCollectionCommand(base.GroupCommandNew): def __init__(self): - super().__init__('Manage PEDM collections') + super().__init__('Manage EPM collections') self.register_command_new(PedmCollectionListCommand(), 'list', 'l') self.register_command_new(PedmCollectionViewCommand(), 'view', 'v') self.register_command_new(PedmCollectionAddCommand(), 'add', 'a') @@ -2214,3 +2215,140 @@ def verify_uid(uids: Any) -> Optional[List[bytes]]: if not status.success: if isinstance(status, admin_types.EntityStatus): logger.warning(f'Failed to remove "{status.entity_uid}": {status.message}') + + +class PedmOfflineCommand(base.GroupCommandNew): + def __init__(self): + super().__init__('Offline agent communication') + self.register_command_new(PedmOfflineRegisterCommand(), 'register') + #self.register_command_new(PedmDeploymentAddCommand(), 'sync', 'a') + + +class PedmOfflineRegisterCommand(base.ArgparseCommand, PedmUtils): + def __init__(self): + parser = argparse.ArgumentParser(prog='register', description='Register offline agent') + parser.add_argument('--deployment', dest='deployment', action='store', required=True, + help='Agent\'s deployment') + parser.add_argument('--output', dest='output', action='store', + help='Registration response filename') + parser.add_argument('file', help='Registration request filename') + super().__init__(parser) + + def execute(self, context: KeeperParams, **kwargs) -> Any: + plugin = admin_plugin.get_pedm_plugin(context) + + deployment = self.resolve_single_deployment(plugin, kwargs.get('deployment')) + + filename = kwargs.get('file') + if not filename: + raise base.CommandError('Offline registration file is not found') + if not os.path.isfile(filename): + raise base.CommandError('Offline registration file is not found') + + with open(filename, 'rt', encoding='utf-8') as f: + offline_request = json.load(f) + + v = offline_request.get('AgentUID') + if not v: + raise base.CommandError('"AgentUID" parameter is missing') + agent_uid = utils.base64_url_decode(v) + v = offline_request.get('PublicKey') + if not v: + raise base.CommandError('"PublicKey" parameter is missing') + public_key = utils.base64_url_decode(v) + agent_public_key = crypto.load_ec_public_key(public_key) + machine_id = offline_request.get('MachineID') + + type1_collection: Optional[pedm_pb2.CollectionValue] = None + type202_collection: Optional[pedm_pb2.CollectionValue] = None + agent_description: Optional[bytes] = None + + os_collection = offline_request.get('OsCollection') + if isinstance(os_collection, dict): + try: + collection_fields = pedm_shared.get_collection_required_fields(pedm_shared.CollectionType.OsBuild) + if collection_fields: + os_release = {} + key_parts: List[str] = [] + key_fields = collection_fields.primary_key_fields or collection_fields.all_fields + for k in key_fields: + s = os_collection.get(k) + if not isinstance(s, str) or len(s) == 0: + raise base.CommandError(f'Missing required field "{k}" for agent description') + os_release[k] = s + key_parts.append(s) + key = ''.join(key_parts) + collection_uid = pedm_shared.get_collection_uid(plugin.agent_key, pedm_shared.CollectionType.OsBuild, key) + collection_data = crypto.encrypt_aes_v2(json.dumps(os_release).encode('utf-8'), plugin.agent_key) + type1_collection = pedm_pb2.CollectionValue( + collectionUid=utils.base64_url_decode(collection_uid), + collectionType=pedm_shared.CollectionType.OsBuild, + encryptedData=collection_data + ) + + collection_fields = pedm_shared.get_collection_required_fields(pedm_shared.CollectionType.OsVersion) + if collection_fields: + os_version = {} + key_parts.clear() + key_fields = collection_fields.primary_key_fields or collection_fields.all_fields + for k in key_fields: + s = os_release.get(k) + if not isinstance(s, str) or len(s) == 0: + raise base.CommandError(f'Missing required field "{k}" for agent description') + os_version[k] = s + key_parts.append(s) + key = ''.join(key_parts) + collection_uid = pedm_shared.get_collection_uid(plugin.agent_key, pedm_shared.CollectionType.OsVersion, key) + collection_data = crypto.encrypt_aes_v2(json.dumps(os_version).encode('utf-8'), plugin.agent_key) + type202_collection = pedm_pb2.CollectionValue( + collectionUid=utils.base64_url_decode(collection_uid), + collectionType=pedm_shared.CollectionType.OsVersion, + encryptedData=collection_data + ) + + agent_description = json.dumps(os_collection).encode('utf-8') + agent_description = crypto.encrypt_aes_v2(agent_description, plugin.agent_key) + except Exception as e: + logging.warning('Update agent collection error: %s', e) + + rq = pedm_pb2.OfflineAgentRegisterRequest() + rq.agentUid = agent_uid + rq.publicKey = public_key + rq.deploymentUid = utils.base64_url_decode(deployment.deployment_uid) + if machine_id: + rq.machineId = machine_id + if type1_collection: + rq.collection.append(type1_collection) + if type202_collection: + rq.collection.append(type202_collection) + rq.agentData = agent_description + + rs = api.execute_router(context, 'pedm/register_offline_agent', rq, rs_type=pedm_pb2.OfflineAgentRegisterResponse) # type: pedm_pb2.OfflineAgentRegisterResponse + agent_uid = rs.agentUid + + enterprise_data = context.enterprise + enterprise_keys = enterprise_data['keys'] + ec_public = enterprise_keys['ecc_public_key'] + ec_public_key = utils.base64_url_decode(ec_public) + agent_info = pedm_shared.DeploymentAgentInformation(hash_key=plugin.agent_key, peer_public_key=ec_public_key) + + agent_data = json.dumps(agent_info.to_dict()).encode('utf-8') + agent_data = crypto.encrypt_ec(agent_data, agent_public_key) + + host = next((host for host, server in constants.KEEPER_PUBLIC_HOSTS.items() if server == context.server), context.server) + + registration_info = { + 'AgentUID': utils.base64_url_encode(agent_uid), + 'DeploymentUID': deployment.deployment_uid, + 'Host': host, + 'AgentData': utils.base64_url_encode(agent_data) + } + + response = json.dumps(registration_info, indent=2) + output = kwargs.get("output") + if output: + with open(output, 'wt', encoding='utf-8') as f: + f.write(response) + return None + else: + return response diff --git a/keepercommander/pedm/admin_plugin.py b/keepercommander/pedm/admin_plugin.py index 4af2d98a3..4dd21c57b 100644 --- a/keepercommander/pedm/admin_plugin.py +++ b/keepercommander/pedm/admin_plugin.py @@ -4,7 +4,7 @@ import datetime import json import logging -from typing import List, Optional, Set, Iterable, Tuple, Dict, Any, cast +from typing import List, Optional, Set, Iterable, Tuple, Dict, Any, cast, Union from ..params import KeeperParams from . import admin_storage, admin_types @@ -669,6 +669,8 @@ def modify_collections(self, *, add_collections: Optional[Iterable[admin_types.CollectionData]] = None, update_collections: Optional[Iterable[admin_types.CollectionData]] = None, remove_collections: Optional[Iterable[str]] = None) -> admin_types.ModifyStatus: + + status = admin_types.ModifyStatus(add=[], update=[], remove=[]) to_add: List[pedm_pb2.CollectionValue] = [] to_update: List[pedm_pb2.CollectionValue] = [] if add_collections is not None: @@ -712,7 +714,6 @@ def modify_collections(self, *, for collection_uid in remove_collections: to_remove.append(utils.base64_url_decode(collection_uid)) - status = admin_types.ModifyStatus(add=[], update=[], remove=[]) while len(to_add) > 0 or len(to_update) > 0 or len(to_remove) > 0: crq = pedm_pb2.CollectionRequest() if len(to_add) > 0: @@ -754,16 +755,26 @@ def get_collection_links(self, *, links: Iterable[admin_types.CollectionLink]) - yield admin_types.CollectionLinkData(collection_link=collection_link, link_data=ld.linkData) def set_collection_links( - self, *, set_links: Optional[Iterable[admin_types.CollectionLink]] = None, + self, *, set_links: Optional[Iterable[Union[admin_types.CollectionLink, admin_types.CollectionLinkData]]] = None, unset_links: Optional[Iterable[admin_types.CollectionLink]] = None ) -> admin_types.ModifyStatus: clrq = pedm_pb2.SetCollectionLinkRequest() if set_links is not None: for coll in set_links: + link: admin_types.CollectionLink + link_data: Optional[bytes] + if isinstance(coll, admin_types.CollectionLinkData): + link = coll.collection_link + link_data = coll.link_data + else: + link = coll + link_data = None cln = pedm_pb2.CollectionLinkData() - cln.collectionUid = utils.base64_url_decode(coll.collection_uid) - cln.linkUid = utils.base64_url_decode(coll.link_uid) - cln.linkType = coll.link_type # type: ignore + cln.collectionUid = utils.base64_url_decode(link.collection_uid) + cln.linkUid = utils.base64_url_decode(link.link_uid) + cln.linkType = link.link_type # type: ignore + if link_data: + cln.linkData = link_data clrq.addCollection.append(cln) if unset_links is not None: diff --git a/keepercommander/proto/NotificationCenter_pb2.py b/keepercommander/proto/NotificationCenter_pb2.py index 42eea642d..c612ee515 100644 --- a/keepercommander/proto/NotificationCenter_pb2.py +++ b/keepercommander/proto/NotificationCenter_pb2.py @@ -14,7 +14,7 @@ from . import GraphSync_pb2 as GraphSync__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18NotificationCenter.proto\x12\x12NotificationCenter\x1a\x0fGraphSync.proto\".\n\rEncryptedData\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\xe2\x02\n\x0cNotification\x12\x32\n\x04type\x18\x01 \x01(\x0e\x32$.NotificationCenter.NotificationType\x12>\n\x08\x63\x61tegory\x18\x02 \x01(\x0e\x32(.NotificationCenter.NotificationCategoryB\x02\x18\x01\x12\'\n\x06sender\x18\x03 \x01(\x0b\x32\x17.GraphSync.GraphSyncRef\x12\x16\n\x0esenderFullName\x18\x04 \x01(\t\x12\x38\n\rencryptedData\x18\x05 \x01(\x0b\x32!.NotificationCenter.EncryptedData\x12%\n\x04refs\x18\x06 \x03(\x0b\x32\x17.GraphSync.GraphSyncRef\x12<\n\ncategories\x18\x07 \x03(\x0e\x32(.NotificationCenter.NotificationCategory\"\x97\x01\n\x14NotificationReadMark\x12\x0b\n\x03uid\x18\x01 \x01(\x0c\x12\x1c\n\x14notification_edge_id\x18\x02 \x01(\x03\x12\x14\n\x0cmark_edge_id\x18\x03 \x01(\x03\x12>\n\nreadStatus\x18\x04 \x01(\x0e\x32*.NotificationCenter.NotificationReadStatus\"\x8d\x02\n\x13NotificationContent\x12\x38\n\x0cnotification\x18\x01 \x01(\x0b\x32 .NotificationCenter.NotificationH\x00\x12@\n\nreadStatus\x18\x02 \x01(\x0e\x32*.NotificationCenter.NotificationReadStatusH\x00\x12H\n\x0e\x61pprovalStatus\x18\x03 \x01(\x0e\x32..NotificationCenter.NotificationApprovalStatusH\x00\x12\x15\n\rclientTypeIDs\x18\x04 \x03(\x05\x12\x11\n\tdeviceIDs\x18\x05 \x03(\x03\x42\x06\n\x04type\"o\n\x13NotificationWrapper\x12\x0b\n\x03uid\x18\x01 \x01(\x0c\x12\x38\n\x07\x63ontent\x18\x02 \x01(\x0b\x32\'.NotificationCenter.NotificationContent\x12\x11\n\ttimestamp\x18\x03 \x01(\x03\"m\n\x10NotificationSync\x12\x35\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32\'.NotificationCenter.NotificationWrapper\x12\x11\n\tsyncPoint\x18\x02 \x01(\x03\x12\x0f\n\x07hasMore\x18\x03 \x01(\x08\"g\n\x10ReadStatusUpdate\x12\x17\n\x0fnotificationUid\x18\x01 \x01(\x0c\x12:\n\x06status\x18\x02 \x01(\x0e\x32*.NotificationCenter.NotificationReadStatus\"o\n\x14\x41pprovalStatusUpdate\x12\x17\n\x0fnotificationUid\x18\x01 \x01(\x0c\x12>\n\x06status\x18\x02 \x01(\x0e\x32..NotificationCenter.NotificationApprovalStatus\"^\n\x1cProcessMarkReadEventsRequest\x12>\n\x10readStatusUpdate\x18\x01 \x03(\x0b\x32$.NotificationCenter.ReadStatusUpdate\"\xa8\x01\n\x17NotificationSendRequest\x12+\n\nrecipients\x18\x01 \x03(\x0b\x32\x17.GraphSync.GraphSyncRef\x12\x36\n\x0cnotification\x18\x02 \x01(\x0b\x32 .NotificationCenter.Notification\x12\x15\n\rclientTypeIDs\x18\x03 \x03(\x05\x12\x11\n\tdeviceIDs\x18\x04 \x03(\x03\"^\n\x18NotificationsSendRequest\x12\x42\n\rnotifications\x18\x01 \x03(\x0b\x32+.NotificationCenter.NotificationSendRequest\",\n\x17NotificationSyncRequest\x12\x11\n\tsyncPoint\x18\x01 \x01(\x03*\x9f\x01\n\x14NotificationCategory\x12\x12\n\x0eNC_UNSPECIFIED\x10\x00\x12\x0e\n\nNC_ACCOUNT\x10\x01\x12\x0e\n\nNC_SHARING\x10\x02\x12\x11\n\rNC_ENTERPRISE\x10\x03\x12\x0f\n\x0bNC_SECURITY\x10\x04\x12\x0e\n\nNC_REQUEST\x10\x05\x12\r\n\tNC_SYSTEM\x10\x06\x12\x10\n\x0cNC_PROMOTION\x10\x07*\xc2\x02\n\x10NotificationType\x12\x12\n\x0eNT_UNSPECIFIED\x10\x00\x12\x0c\n\x08NT_ALERT\x10\x01\x12\x16\n\x12NT_DEVICE_APPROVAL\x10\x02\x12\x1a\n\x16NT_MASTER_PASS_UPDATED\x10\x03\x12\x15\n\x11NT_SHARE_APPROVAL\x10\x04\x12\x1e\n\x1aNT_SHARE_APPROVAL_APPROVED\x10\x05\x12\r\n\tNT_SHARED\x10\x06\x12\x12\n\x0eNT_TRANSFERRED\x10\x07\x12\x1c\n\x18NT_LICENSE_LIMIT_REACHED\x10\x08\x12\x17\n\x13NT_APPROVAL_REQUEST\x10\t\x12\x18\n\x14NT_APPROVED_RESPONSE\x10\n\x12\x16\n\x12NT_DENIED_RESPONSE\x10\x0b\x12\x15\n\x11NT_2FA_CONFIGURED\x10\x0c*Y\n\x16NotificationReadStatus\x12\x13\n\x0fNRS_UNSPECIFIED\x10\x00\x12\x0c\n\x08NRS_LAST\x10\x01\x12\x0c\n\x08NRS_READ\x10\x02\x12\x0e\n\nNRS_UNREAD\x10\x03*\x86\x01\n\x1aNotificationApprovalStatus\x12\x13\n\x0fNAS_UNSPECIFIED\x10\x00\x12\x10\n\x0cNAS_APPROVED\x10\x01\x12\x0e\n\nNAS_DENIED\x10\x02\x12\x1c\n\x18NAS_LOST_APPROVAL_RIGHTS\x10\x03\x12\x13\n\x0fNAS_LOST_ACCESS\x10\x04\x42.\n\x18\x63om.keepersecurity.protoB\x12NotificationCenterb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18NotificationCenter.proto\x12\x12NotificationCenter\x1a\x0fGraphSync.proto\".\n\rEncryptedData\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\xe2\x02\n\x0cNotification\x12\x32\n\x04type\x18\x01 \x01(\x0e\x32$.NotificationCenter.NotificationType\x12>\n\x08\x63\x61tegory\x18\x02 \x01(\x0e\x32(.NotificationCenter.NotificationCategoryB\x02\x18\x01\x12\'\n\x06sender\x18\x03 \x01(\x0b\x32\x17.GraphSync.GraphSyncRef\x12\x16\n\x0esenderFullName\x18\x04 \x01(\t\x12\x38\n\rencryptedData\x18\x05 \x01(\x0b\x32!.NotificationCenter.EncryptedData\x12%\n\x04refs\x18\x06 \x03(\x0b\x32\x17.GraphSync.GraphSyncRef\x12<\n\ncategories\x18\x07 \x03(\x0e\x32(.NotificationCenter.NotificationCategory\"\x97\x01\n\x14NotificationReadMark\x12\x0b\n\x03uid\x18\x01 \x01(\x0c\x12\x1c\n\x14notification_edge_id\x18\x02 \x01(\x03\x12\x14\n\x0cmark_edge_id\x18\x03 \x01(\x03\x12>\n\nreadStatus\x18\x04 \x01(\x0e\x32*.NotificationCenter.NotificationReadStatus\"\xa6\x02\n\x13NotificationContent\x12\x38\n\x0cnotification\x18\x01 \x01(\x0b\x32 .NotificationCenter.NotificationH\x00\x12@\n\nreadStatus\x18\x02 \x01(\x0e\x32*.NotificationCenter.NotificationReadStatusH\x00\x12H\n\x0e\x61pprovalStatus\x18\x03 \x01(\x0e\x32..NotificationCenter.NotificationApprovalStatusH\x00\x12\x17\n\rtrimmingPoint\x18\x04 \x01(\x08H\x00\x12\x15\n\rclientTypeIDs\x18\x05 \x03(\x05\x12\x11\n\tdeviceIDs\x18\x06 \x03(\x03\x42\x06\n\x04type\"o\n\x13NotificationWrapper\x12\x0b\n\x03uid\x18\x01 \x01(\x0c\x12\x38\n\x07\x63ontent\x18\x02 \x01(\x0b\x32\'.NotificationCenter.NotificationContent\x12\x11\n\ttimestamp\x18\x03 \x01(\x03\"m\n\x10NotificationSync\x12\x35\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32\'.NotificationCenter.NotificationWrapper\x12\x11\n\tsyncPoint\x18\x02 \x01(\x03\x12\x0f\n\x07hasMore\x18\x03 \x01(\x08\"g\n\x10ReadStatusUpdate\x12\x17\n\x0fnotificationUid\x18\x01 \x01(\x0c\x12:\n\x06status\x18\x02 \x01(\x0e\x32*.NotificationCenter.NotificationReadStatus\"o\n\x14\x41pprovalStatusUpdate\x12\x17\n\x0fnotificationUid\x18\x01 \x01(\x0c\x12>\n\x06status\x18\x02 \x01(\x0e\x32..NotificationCenter.NotificationApprovalStatus\"^\n\x1cProcessMarkReadEventsRequest\x12>\n\x10readStatusUpdate\x18\x01 \x03(\x0b\x32$.NotificationCenter.ReadStatusUpdate\"\xa8\x01\n\x17NotificationSendRequest\x12+\n\nrecipients\x18\x01 \x03(\x0b\x32\x17.GraphSync.GraphSyncRef\x12\x36\n\x0cnotification\x18\x02 \x01(\x0b\x32 .NotificationCenter.Notification\x12\x15\n\rclientTypeIDs\x18\x03 \x03(\x05\x12\x11\n\tdeviceIDs\x18\x04 \x03(\x03\"^\n\x18NotificationsSendRequest\x12\x42\n\rnotifications\x18\x01 \x03(\x0b\x32+.NotificationCenter.NotificationSendRequest\",\n\x17NotificationSyncRequest\x12\x11\n\tsyncPoint\x18\x01 \x01(\x03\"e\n(NotificationsApprovalStatusUpdateRequest\x12\x39\n\x07updates\x18\x01 \x03(\x0b\x32(.NotificationCenter.ApprovalStatusUpdate*\x9f\x01\n\x14NotificationCategory\x12\x12\n\x0eNC_UNSPECIFIED\x10\x00\x12\x0e\n\nNC_ACCOUNT\x10\x01\x12\x0e\n\nNC_SHARING\x10\x02\x12\x11\n\rNC_ENTERPRISE\x10\x03\x12\x0f\n\x0bNC_SECURITY\x10\x04\x12\x0e\n\nNC_REQUEST\x10\x05\x12\r\n\tNC_SYSTEM\x10\x06\x12\x10\n\x0cNC_PROMOTION\x10\x07*\x9e\x04\n\x10NotificationType\x12\x12\n\x0eNT_UNSPECIFIED\x10\x00\x12\x0c\n\x08NT_ALERT\x10\x01\x12\x16\n\x12NT_DEVICE_APPROVAL\x10\x02\x12\x1a\n\x16NT_MASTER_PASS_UPDATED\x10\x03\x12\x15\n\x11NT_SHARE_APPROVAL\x10\x04\x12\x1e\n\x1aNT_SHARE_APPROVAL_APPROVED\x10\x05\x12\r\n\tNT_SHARED\x10\x06\x12\x12\n\x0eNT_TRANSFERRED\x10\x07\x12\x1c\n\x18NT_LICENSE_LIMIT_REACHED\x10\x08\x12\x17\n\x13NT_APPROVAL_REQUEST\x10\t\x12\x18\n\x14NT_APPROVED_RESPONSE\x10\n\x12\x16\n\x12NT_DENIED_RESPONSE\x10\x0b\x12\x15\n\x11NT_2FA_CONFIGURED\x10\x0c\x12\x1c\n\x18NT_SHARE_APPROVAL_DENIED\x10\r\x12\x1f\n\x1bNT_DEVICE_APPROVAL_APPROVED\x10\x0e\x12\x1d\n\x19NT_DEVICE_APPROVAL_DENIED\x10\x0f\x12\x16\n\x12NT_ACCOUNT_CREATED\x10\x10\x12\x12\n\x0eNT_2FA_ENABLED\x10\x11\x12\x13\n\x0fNT_2FA_DISABLED\x10\x12\x12\x1c\n\x18NT_SECURITY_KEYS_ENABLED\x10\x13\x12\x1d\n\x19NT_SECURITY_KEYS_DISABLED\x10\x14*Y\n\x16NotificationReadStatus\x12\x13\n\x0fNRS_UNSPECIFIED\x10\x00\x12\x0c\n\x08NRS_LAST\x10\x01\x12\x0c\n\x08NRS_READ\x10\x02\x12\x0e\n\nNRS_UNREAD\x10\x03*\x99\x01\n\x1aNotificationApprovalStatus\x12\x13\n\x0fNAS_UNSPECIFIED\x10\x00\x12\x10\n\x0cNAS_APPROVED\x10\x01\x12\x0e\n\nNAS_DENIED\x10\x02\x12\x1c\n\x18NAS_LOST_APPROVAL_RIGHTS\x10\x03\x12\x13\n\x0fNAS_LOST_ACCESS\x10\x04\x12\x11\n\rNAS_ESCALATED\x10\x05\x42.\n\x18\x63om.keepersecurity.protoB\x12NotificationCenterb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -24,14 +24,14 @@ _globals['DESCRIPTOR']._serialized_options = b'\n\030com.keepersecurity.protoB\022NotificationCenter' _globals['_NOTIFICATION'].fields_by_name['category']._options = None _globals['_NOTIFICATION'].fields_by_name['category']._serialized_options = b'\030\001' - _globals['_NOTIFICATIONCATEGORY']._serialized_start=1748 - _globals['_NOTIFICATIONCATEGORY']._serialized_end=1907 - _globals['_NOTIFICATIONTYPE']._serialized_start=1910 - _globals['_NOTIFICATIONTYPE']._serialized_end=2232 - _globals['_NOTIFICATIONREADSTATUS']._serialized_start=2234 - _globals['_NOTIFICATIONREADSTATUS']._serialized_end=2323 - _globals['_NOTIFICATIONAPPROVALSTATUS']._serialized_start=2326 - _globals['_NOTIFICATIONAPPROVALSTATUS']._serialized_end=2460 + _globals['_NOTIFICATIONCATEGORY']._serialized_start=1876 + _globals['_NOTIFICATIONCATEGORY']._serialized_end=2035 + _globals['_NOTIFICATIONTYPE']._serialized_start=2038 + _globals['_NOTIFICATIONTYPE']._serialized_end=2580 + _globals['_NOTIFICATIONREADSTATUS']._serialized_start=2582 + _globals['_NOTIFICATIONREADSTATUS']._serialized_end=2671 + _globals['_NOTIFICATIONAPPROVALSTATUS']._serialized_start=2674 + _globals['_NOTIFICATIONAPPROVALSTATUS']._serialized_end=2827 _globals['_ENCRYPTEDDATA']._serialized_start=65 _globals['_ENCRYPTEDDATA']._serialized_end=111 _globals['_NOTIFICATION']._serialized_start=114 @@ -39,21 +39,23 @@ _globals['_NOTIFICATIONREADMARK']._serialized_start=471 _globals['_NOTIFICATIONREADMARK']._serialized_end=622 _globals['_NOTIFICATIONCONTENT']._serialized_start=625 - _globals['_NOTIFICATIONCONTENT']._serialized_end=894 - _globals['_NOTIFICATIONWRAPPER']._serialized_start=896 - _globals['_NOTIFICATIONWRAPPER']._serialized_end=1007 - _globals['_NOTIFICATIONSYNC']._serialized_start=1009 - _globals['_NOTIFICATIONSYNC']._serialized_end=1118 - _globals['_READSTATUSUPDATE']._serialized_start=1120 - _globals['_READSTATUSUPDATE']._serialized_end=1223 - _globals['_APPROVALSTATUSUPDATE']._serialized_start=1225 - _globals['_APPROVALSTATUSUPDATE']._serialized_end=1336 - _globals['_PROCESSMARKREADEVENTSREQUEST']._serialized_start=1338 - _globals['_PROCESSMARKREADEVENTSREQUEST']._serialized_end=1432 - _globals['_NOTIFICATIONSENDREQUEST']._serialized_start=1435 - _globals['_NOTIFICATIONSENDREQUEST']._serialized_end=1603 - _globals['_NOTIFICATIONSSENDREQUEST']._serialized_start=1605 - _globals['_NOTIFICATIONSSENDREQUEST']._serialized_end=1699 - _globals['_NOTIFICATIONSYNCREQUEST']._serialized_start=1701 - _globals['_NOTIFICATIONSYNCREQUEST']._serialized_end=1745 + _globals['_NOTIFICATIONCONTENT']._serialized_end=919 + _globals['_NOTIFICATIONWRAPPER']._serialized_start=921 + _globals['_NOTIFICATIONWRAPPER']._serialized_end=1032 + _globals['_NOTIFICATIONSYNC']._serialized_start=1034 + _globals['_NOTIFICATIONSYNC']._serialized_end=1143 + _globals['_READSTATUSUPDATE']._serialized_start=1145 + _globals['_READSTATUSUPDATE']._serialized_end=1248 + _globals['_APPROVALSTATUSUPDATE']._serialized_start=1250 + _globals['_APPROVALSTATUSUPDATE']._serialized_end=1361 + _globals['_PROCESSMARKREADEVENTSREQUEST']._serialized_start=1363 + _globals['_PROCESSMARKREADEVENTSREQUEST']._serialized_end=1457 + _globals['_NOTIFICATIONSENDREQUEST']._serialized_start=1460 + _globals['_NOTIFICATIONSENDREQUEST']._serialized_end=1628 + _globals['_NOTIFICATIONSSENDREQUEST']._serialized_start=1630 + _globals['_NOTIFICATIONSSENDREQUEST']._serialized_end=1724 + _globals['_NOTIFICATIONSYNCREQUEST']._serialized_start=1726 + _globals['_NOTIFICATIONSYNCREQUEST']._serialized_end=1770 + _globals['_NOTIFICATIONSAPPROVALSTATUSUPDATEREQUEST']._serialized_start=1772 + _globals['_NOTIFICATIONSAPPROVALSTATUSUPDATEREQUEST']._serialized_end=1873 # @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/NotificationCenter_pb2.pyi b/keepercommander/proto/NotificationCenter_pb2.pyi index bf43f2688..a79107c66 100644 --- a/keepercommander/proto/NotificationCenter_pb2.pyi +++ b/keepercommander/proto/NotificationCenter_pb2.pyi @@ -33,6 +33,14 @@ class NotificationType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): NT_APPROVED_RESPONSE: _ClassVar[NotificationType] NT_DENIED_RESPONSE: _ClassVar[NotificationType] NT_2FA_CONFIGURED: _ClassVar[NotificationType] + NT_SHARE_APPROVAL_DENIED: _ClassVar[NotificationType] + NT_DEVICE_APPROVAL_APPROVED: _ClassVar[NotificationType] + NT_DEVICE_APPROVAL_DENIED: _ClassVar[NotificationType] + NT_ACCOUNT_CREATED: _ClassVar[NotificationType] + NT_2FA_ENABLED: _ClassVar[NotificationType] + NT_2FA_DISABLED: _ClassVar[NotificationType] + NT_SECURITY_KEYS_ENABLED: _ClassVar[NotificationType] + NT_SECURITY_KEYS_DISABLED: _ClassVar[NotificationType] class NotificationReadStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = [] @@ -48,6 +56,7 @@ class NotificationApprovalStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapp NAS_DENIED: _ClassVar[NotificationApprovalStatus] NAS_LOST_APPROVAL_RIGHTS: _ClassVar[NotificationApprovalStatus] NAS_LOST_ACCESS: _ClassVar[NotificationApprovalStatus] + NAS_ESCALATED: _ClassVar[NotificationApprovalStatus] NC_UNSPECIFIED: NotificationCategory NC_ACCOUNT: NotificationCategory NC_SHARING: NotificationCategory @@ -69,6 +78,14 @@ NT_APPROVAL_REQUEST: NotificationType NT_APPROVED_RESPONSE: NotificationType NT_DENIED_RESPONSE: NotificationType NT_2FA_CONFIGURED: NotificationType +NT_SHARE_APPROVAL_DENIED: NotificationType +NT_DEVICE_APPROVAL_APPROVED: NotificationType +NT_DEVICE_APPROVAL_DENIED: NotificationType +NT_ACCOUNT_CREATED: NotificationType +NT_2FA_ENABLED: NotificationType +NT_2FA_DISABLED: NotificationType +NT_SECURITY_KEYS_ENABLED: NotificationType +NT_SECURITY_KEYS_DISABLED: NotificationType NRS_UNSPECIFIED: NotificationReadStatus NRS_LAST: NotificationReadStatus NRS_READ: NotificationReadStatus @@ -78,6 +95,7 @@ NAS_APPROVED: NotificationApprovalStatus NAS_DENIED: NotificationApprovalStatus NAS_LOST_APPROVAL_RIGHTS: NotificationApprovalStatus NAS_LOST_ACCESS: NotificationApprovalStatus +NAS_ESCALATED: NotificationApprovalStatus class EncryptedData(_message.Message): __slots__ = ["version", "data"] @@ -118,18 +136,20 @@ class NotificationReadMark(_message.Message): def __init__(self, uid: _Optional[bytes] = ..., notification_edge_id: _Optional[int] = ..., mark_edge_id: _Optional[int] = ..., readStatus: _Optional[_Union[NotificationReadStatus, str]] = ...) -> None: ... class NotificationContent(_message.Message): - __slots__ = ["notification", "readStatus", "approvalStatus", "clientTypeIDs", "deviceIDs"] + __slots__ = ["notification", "readStatus", "approvalStatus", "trimmingPoint", "clientTypeIDs", "deviceIDs"] NOTIFICATION_FIELD_NUMBER: _ClassVar[int] READSTATUS_FIELD_NUMBER: _ClassVar[int] APPROVALSTATUS_FIELD_NUMBER: _ClassVar[int] + TRIMMINGPOINT_FIELD_NUMBER: _ClassVar[int] CLIENTTYPEIDS_FIELD_NUMBER: _ClassVar[int] DEVICEIDS_FIELD_NUMBER: _ClassVar[int] notification: Notification readStatus: NotificationReadStatus approvalStatus: NotificationApprovalStatus + trimmingPoint: bool clientTypeIDs: _containers.RepeatedScalarFieldContainer[int] deviceIDs: _containers.RepeatedScalarFieldContainer[int] - def __init__(self, notification: _Optional[_Union[Notification, _Mapping]] = ..., readStatus: _Optional[_Union[NotificationReadStatus, str]] = ..., approvalStatus: _Optional[_Union[NotificationApprovalStatus, str]] = ..., clientTypeIDs: _Optional[_Iterable[int]] = ..., deviceIDs: _Optional[_Iterable[int]] = ...) -> None: ... + def __init__(self, notification: _Optional[_Union[Notification, _Mapping]] = ..., readStatus: _Optional[_Union[NotificationReadStatus, str]] = ..., approvalStatus: _Optional[_Union[NotificationApprovalStatus, str]] = ..., trimmingPoint: bool = ..., clientTypeIDs: _Optional[_Iterable[int]] = ..., deviceIDs: _Optional[_Iterable[int]] = ...) -> None: ... class NotificationWrapper(_message.Message): __slots__ = ["uid", "content", "timestamp"] @@ -196,3 +216,9 @@ class NotificationSyncRequest(_message.Message): SYNCPOINT_FIELD_NUMBER: _ClassVar[int] syncPoint: int def __init__(self, syncPoint: _Optional[int] = ...) -> None: ... + +class NotificationsApprovalStatusUpdateRequest(_message.Message): + __slots__ = ["updates"] + UPDATES_FIELD_NUMBER: _ClassVar[int] + updates: _containers.RepeatedCompositeFieldContainer[ApprovalStatusUpdate] + def __init__(self, updates: _Optional[_Iterable[_Union[ApprovalStatusUpdate, _Mapping]]] = ...) -> None: ... diff --git a/keepercommander/proto/pedm_pb2.py b/keepercommander/proto/pedm_pb2.py index 89479463a..a6b4adc8d 100644 --- a/keepercommander/proto/pedm_pb2.py +++ b/keepercommander/proto/pedm_pb2.py @@ -15,7 +15,7 @@ from . import NotificationCenter_pb2 as NotificationCenter__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\npedm.proto\x12\x04PEDM\x1a\x0c\x66older.proto\x1a\x18NotificationCenter.proto\"O\n\x17PEDMTOTPValidateRequest\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x14\n\x0c\x65nterpriseId\x18\x02 \x01(\x05\x12\x0c\n\x04\x63ode\x18\x03 \x01(\x05\";\n\nPedmStatus\x12\x0b\n\x03key\x18\x01 \x03(\x0c\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x0f\n\x07message\x18\x03 \x01(\t\"\x89\x01\n\x12PedmStatusResponse\x12#\n\taddStatus\x18\x01 \x03(\x0b\x32\x10.PEDM.PedmStatus\x12&\n\x0cupdateStatus\x18\x02 \x03(\x0b\x32\x10.PEDM.PedmStatus\x12&\n\x0cremoveStatus\x18\x03 \x03(\x0b\x32\x10.PEDM.PedmStatus\"4\n\x0e\x44\x65ploymentData\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x65\x63PrivateKey\x18\x02 \x01(\x0c\"\x9a\x01\n\x17\x44\x65ploymentCreateRequest\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x0e\n\x06\x61\x65sKey\x18\x02 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x03 \x01(\x0c\x12\x19\n\x11spiffeCertificate\x18\x04 \x01(\x0c\x12\x15\n\rencryptedData\x18\x05 \x01(\x0c\x12\x11\n\tagentData\x18\x06 \x01(\x0c\"\x8d\x01\n\x17\x44\x65ploymentUpdateRequest\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x15\n\rencryptedData\x18\x02 \x01(\x0c\x12)\n\x08\x64isabled\x18\x03 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x19\n\x11spiffeCertificate\x18\x04 \x01(\x0c\"\xa2\x01\n\x17ModifyDeploymentRequest\x12\x34\n\raddDeployment\x18\x01 \x03(\x0b\x32\x1d.PEDM.DeploymentCreateRequest\x12\x37\n\x10updateDeployment\x18\x02 \x03(\x0b\x32\x1d.PEDM.DeploymentUpdateRequest\x12\x18\n\x10removeDeployment\x18\x03 \x03(\x0c\"a\n\x0b\x41gentUpdate\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\x12)\n\x08\x64isabled\x18\x02 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x15\n\rdeploymentUid\x18\x03 \x01(\x0c\"Q\n\x12ModifyAgentRequest\x12&\n\x0bupdateAgent\x18\x02 \x03(\x0b\x32\x11.PEDM.AgentUpdate\x12\x13\n\x0bremoveAgent\x18\x03 \x03(\x0c\"p\n\tPolicyAdd\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x14\n\x0c\x65ncryptedKey\x18\x04 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x05 \x01(\x08\"v\n\x0cPolicyUpdate\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12)\n\x08\x64isabled\x18\x04 \x01(\x0e\x32\x17.Folder.SetBooleanValue\"s\n\rPolicyRequest\x12\"\n\taddPolicy\x18\x01 \x03(\x0b\x32\x0f.PEDM.PolicyAdd\x12(\n\x0cupdatePolicy\x18\x02 \x03(\x0b\x32\x12.PEDM.PolicyUpdate\x12\x14\n\x0cremovePolicy\x18\x03 \x03(\x0c\"6\n\nPolicyLink\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x15\n\rcollectionUid\x18\x02 \x03(\x0c\"E\n\x1aSetPolicyCollectionRequest\x12\'\n\rsetCollection\x18\x01 \x03(\x0b\x32\x10.PEDM.PolicyLink\"W\n\x0f\x43ollectionValue\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x16\n\x0e\x63ollectionType\x18\x02 \x01(\x05\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\"z\n\x12\x43ollectionLinkData\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x0f\n\x07linkUid\x18\x02 \x01(\x0c\x12*\n\x08linkType\x18\x03 \x01(\x0e\x32\x18.PEDM.CollectionLinkType\x12\x10\n\x08linkData\x18\x04 \x01(\x0c\"\x8c\x01\n\x11\x43ollectionRequest\x12,\n\raddCollection\x18\x01 \x03(\x0b\x32\x15.PEDM.CollectionValue\x12/\n\x10updateCollection\x18\x02 \x03(\x0b\x32\x15.PEDM.CollectionValue\x12\x18\n\x10removeCollection\x18\x03 \x03(\x0c\"{\n\x18SetCollectionLinkRequest\x12/\n\raddCollection\x18\x01 \x03(\x0b\x32\x18.PEDM.CollectionLinkData\x12.\n\x10removeCollection\x18\x02 \x03(\x0b\x32\x14.PEDM.CollectionLink\"F\n\x15\x41pprovalActionRequest\x12\x0f\n\x07\x61pprove\x18\x01 \x03(\x0c\x12\x0c\n\x04\x64\x65ny\x18\x02 \x03(\x0c\x12\x0e\n\x06remove\x18\x03 \x03(\x0c\"\xab\x01\n\x0e\x44\x65ploymentNode\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x02 \x01(\x08\x12\x0e\n\x06\x61\x65sKey\x18\x03 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x04 \x01(\x0c\x12\x15\n\rencryptedData\x18\x05 \x01(\x0c\x12\x11\n\tagentData\x18\x06 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x07 \x01(\x03\x12\x10\n\x08modified\x18\x08 \x01(\x03\"\xa8\x01\n\tAgentNode\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\x12\x11\n\tmachineId\x18\x02 \x01(\t\x12\x15\n\rdeploymentUid\x18\x03 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x04 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x05 \x01(\x08\x12\x15\n\rencryptedData\x18\x06 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x07 \x01(\x03\x12\x10\n\x08modified\x18\x08 \x01(\x03\"\x94\x01\n\nPolicyNode\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x14\n\x0c\x65ncryptedKey\x18\x04 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x05 \x01(\x03\x12\x10\n\x08modified\x18\x06 \x01(\x03\x12\x10\n\x08\x64isabled\x18\x07 \x01(\x08\"g\n\x0e\x43ollectionNode\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x16\n\x0e\x63ollectionType\x18\x02 \x01(\x05\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x03\"d\n\x0e\x43ollectionLink\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x0f\n\x07linkUid\x18\x02 \x01(\x0c\x12*\n\x08linkType\x18\x03 \x01(\x0e\x32\x18.PEDM.CollectionLinkType\"\x9d\x01\n\x12\x41pprovalStatusNode\x12\x13\n\x0b\x61pprovalUid\x18\x01 \x01(\x0c\x12\x46\n\x0e\x61pprovalStatus\x18\x02 \x01(\x0e\x32..NotificationCenter.NotificationApprovalStatus\x12\x18\n\x10\x65nterpriseUserId\x18\x03 \x01(\x03\x12\x10\n\x08modified\x18\n \x01(\x03\"\xb3\x01\n\x0c\x41pprovalNode\x12\x13\n\x0b\x61pprovalUid\x18\x01 \x01(\x0c\x12\x14\n\x0c\x61pprovalType\x18\x02 \x01(\x05\x12\x10\n\x08\x61gentUid\x18\x03 \x01(\x0c\x12\x13\n\x0b\x61\x63\x63ountInfo\x18\x04 \x01(\x0c\x12\x17\n\x0f\x61pplicationInfo\x18\x05 \x01(\x0c\x12\x15\n\rjustification\x18\x06 \x01(\x0c\x12\x10\n\x08\x65xpireIn\x18\x07 \x01(\x05\x12\x0f\n\x07\x63reated\x18\n \x01(\x03\"C\n\rFullSyncToken\x12\x15\n\rstartRevision\x18\x01 \x01(\x03\x12\x0e\n\x06\x65ntity\x18\x02 \x01(\x05\x12\x0b\n\x03key\x18\x03 \x03(\x0c\"$\n\x0cIncSyncToken\x12\x14\n\x0clastRevision\x18\x02 \x01(\x03\"h\n\rPedmSyncToken\x12\'\n\x08\x66ullSync\x18\x02 \x01(\x0b\x32\x13.PEDM.FullSyncTokenH\x00\x12%\n\x07incSync\x18\x03 \x01(\x0b\x32\x12.PEDM.IncSyncTokenH\x00\x42\x07\n\x05token\"/\n\x12GetPedmDataRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\"\xad\x04\n\x13GetPedmDataResponse\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x12\n\nresetCache\x18\x02 \x01(\x08\x12\x0f\n\x07hasMore\x18\x03 \x01(\x08\x12\x1a\n\x12removedDeployments\x18\n \x03(\x0c\x12\x15\n\rremovedAgents\x18\x0b \x03(\x0c\x12\x17\n\x0fremovedPolicies\x18\x0c \x03(\x0c\x12\x19\n\x11removedCollection\x18\r \x03(\x0c\x12\x33\n\x15removedCollectionLink\x18\x0e \x03(\x0b\x32\x14.PEDM.CollectionLink\x12\x18\n\x10removedApprovals\x18\x0f \x03(\x0c\x12)\n\x0b\x64\x65ployments\x18\x14 \x03(\x0b\x32\x14.PEDM.DeploymentNode\x12\x1f\n\x06\x61gents\x18\x15 \x03(\x0b\x32\x0f.PEDM.AgentNode\x12\"\n\x08policies\x18\x16 \x03(\x0b\x32\x10.PEDM.PolicyNode\x12)\n\x0b\x63ollections\x18\x17 \x03(\x0b\x32\x14.PEDM.CollectionNode\x12,\n\x0e\x63ollectionLink\x18\x18 \x03(\x0b\x32\x14.PEDM.CollectionLink\x12%\n\tapprovals\x18\x19 \x03(\x0b\x32\x12.PEDM.ApprovalNode\x12\x30\n\x0e\x61pprovalStatus\x18\x1a \x03(\x0b\x32\x18.PEDM.ApprovalStatusNode\"<\n\x12PolicyAgentRequest\x12\x11\n\tpolicyUid\x18\x01 \x03(\x0c\x12\x13\n\x0bsummaryOnly\x18\x02 \x01(\x08\";\n\x13PolicyAgentResponse\x12\x12\n\nagentCount\x18\x01 \x01(\x05\x12\x10\n\x08\x61gentUid\x18\x02 \x03(\x0c\"]\n\x16\x41uditCollectionRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x10\n\x08valueUid\x18\x02 \x03(\x0c\x12\x16\n\x0e\x63ollectionName\x18\x03 \x03(\t\"h\n\x14\x41uditCollectionValue\x12\x16\n\x0e\x63ollectionName\x18\x01 \x01(\t\x12\x10\n\x08valueUid\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x03\"q\n\x17\x41uditCollectionResponse\x12*\n\x06values\x18\x01 \x03(\x0b\x32\x1a.PEDM.AuditCollectionValue\x12\x0f\n\x07hasMore\x18\x02 \x01(\x08\x12\x19\n\x11\x63ontinuationToken\x18\x03 \x01(\x0c\"H\n\x18GetCollectionLinkRequest\x12,\n\x0e\x63ollectionLink\x18\x01 \x03(\x0b\x32\x14.PEDM.CollectionLink\"Q\n\x19GetCollectionLinkResponse\x12\x34\n\x12\x63ollectionLinkData\x18\x01 \x03(\x0b\x32\x18.PEDM.CollectionLinkData\"2\n\x1aGetActiveAgentCountRequest\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x03(\x05\">\n\x10\x41\x63tiveAgentCount\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x14\n\x0c\x61\x63tiveAgents\x18\x02 \x01(\x05\";\n\x12\x41\x63tiveAgentFailure\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"x\n\x1bGetActiveAgentCountResponse\x12*\n\nagentCount\x18\x01 \x03(\x0b\x32\x16.PEDM.ActiveAgentCount\x12-\n\x0b\x66\x61iledCount\x18\x02 \x03(\x0b\x32\x18.PEDM.ActiveAgentFailure\"\x87\x01\n\x19GetAgentDailyCountRequest\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x03(\x05\x12$\n\tmonthYear\x18\x02 \x01(\x0b\x32\x0f.PEDM.MonthYearH\x00\x12$\n\tdateRange\x18\x03 \x01(\x0b\x32\x0f.PEDM.DateRangeH\x00\x42\x08\n\x06period\"(\n\tMonthYear\x12\r\n\x05month\x18\x01 \x01(\x05\x12\x0c\n\x04year\x18\x02 \x01(\x05\"\'\n\tDateRange\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x03\"3\n\x0f\x41gentDailyCount\x12\x0c\n\x04\x64\x61te\x18\x01 \x01(\x03\x12\x12\n\nagentCount\x18\x02 \x01(\x05\"V\n\x17\x41gentCountForEnterprise\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12%\n\x06\x63ounts\x18\x02 \x03(\x0b\x32\x15.PEDM.AgentDailyCount\"U\n\x1aGetAgentDailyCountResponse\x12\x37\n\x10\x65nterpriseCounts\x18\x01 \x03(\x0b\x32\x1d.PEDM.AgentCountForEnterprise*j\n\x12\x43ollectionLinkType\x12\r\n\tCLT_OTHER\x10\x00\x12\r\n\tCLT_AGENT\x10\x01\x12\x0e\n\nCLT_POLICY\x10\x02\x12\x12\n\x0e\x43LT_COLLECTION\x10\x03\x12\x12\n\x0e\x43LT_DEPLOYMENT\x10\x04\x42 \n\x18\x63om.keepersecurity.protoB\x04PEDMb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\npedm.proto\x12\x04PEDM\x1a\x0c\x66older.proto\x1a\x18NotificationCenter.proto\"O\n\x17PEDMTOTPValidateRequest\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x14\n\x0c\x65nterpriseId\x18\x02 \x01(\x05\x12\x0c\n\x04\x63ode\x18\x03 \x01(\x05\";\n\nPedmStatus\x12\x0b\n\x03key\x18\x01 \x03(\x0c\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x0f\n\x07message\x18\x03 \x01(\t\"\x89\x01\n\x12PedmStatusResponse\x12#\n\taddStatus\x18\x01 \x03(\x0b\x32\x10.PEDM.PedmStatus\x12&\n\x0cupdateStatus\x18\x02 \x03(\x0b\x32\x10.PEDM.PedmStatus\x12&\n\x0cremoveStatus\x18\x03 \x03(\x0b\x32\x10.PEDM.PedmStatus\"4\n\x0e\x44\x65ploymentData\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x65\x63PrivateKey\x18\x02 \x01(\x0c\"\x9a\x01\n\x17\x44\x65ploymentCreateRequest\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x0e\n\x06\x61\x65sKey\x18\x02 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x03 \x01(\x0c\x12\x19\n\x11spiffeCertificate\x18\x04 \x01(\x0c\x12\x15\n\rencryptedData\x18\x05 \x01(\x0c\x12\x11\n\tagentData\x18\x06 \x01(\x0c\"\x8d\x01\n\x17\x44\x65ploymentUpdateRequest\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x15\n\rencryptedData\x18\x02 \x01(\x0c\x12)\n\x08\x64isabled\x18\x03 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x19\n\x11spiffeCertificate\x18\x04 \x01(\x0c\"\xa2\x01\n\x17ModifyDeploymentRequest\x12\x34\n\raddDeployment\x18\x01 \x03(\x0b\x32\x1d.PEDM.DeploymentCreateRequest\x12\x37\n\x10updateDeployment\x18\x02 \x03(\x0b\x32\x1d.PEDM.DeploymentUpdateRequest\x12\x18\n\x10removeDeployment\x18\x03 \x03(\x0c\"a\n\x0b\x41gentUpdate\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\x12)\n\x08\x64isabled\x18\x02 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x15\n\rdeploymentUid\x18\x03 \x01(\x0c\"Q\n\x12ModifyAgentRequest\x12&\n\x0bupdateAgent\x18\x02 \x03(\x0b\x32\x11.PEDM.AgentUpdate\x12\x13\n\x0bremoveAgent\x18\x03 \x03(\x0c\"p\n\tPolicyAdd\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x14\n\x0c\x65ncryptedKey\x18\x04 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x05 \x01(\x08\"v\n\x0cPolicyUpdate\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12)\n\x08\x64isabled\x18\x04 \x01(\x0e\x32\x17.Folder.SetBooleanValue\"s\n\rPolicyRequest\x12\"\n\taddPolicy\x18\x01 \x03(\x0b\x32\x0f.PEDM.PolicyAdd\x12(\n\x0cupdatePolicy\x18\x02 \x03(\x0b\x32\x12.PEDM.PolicyUpdate\x12\x14\n\x0cremovePolicy\x18\x03 \x03(\x0c\"6\n\nPolicyLink\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x15\n\rcollectionUid\x18\x02 \x03(\x0c\"E\n\x1aSetPolicyCollectionRequest\x12\'\n\rsetCollection\x18\x01 \x03(\x0b\x32\x10.PEDM.PolicyLink\"W\n\x0f\x43ollectionValue\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x16\n\x0e\x63ollectionType\x18\x02 \x01(\x05\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\"z\n\x12\x43ollectionLinkData\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x0f\n\x07linkUid\x18\x02 \x01(\x0c\x12*\n\x08linkType\x18\x03 \x01(\x0e\x32\x18.PEDM.CollectionLinkType\x12\x10\n\x08linkData\x18\x04 \x01(\x0c\"\x8c\x01\n\x11\x43ollectionRequest\x12,\n\raddCollection\x18\x01 \x03(\x0b\x32\x15.PEDM.CollectionValue\x12/\n\x10updateCollection\x18\x02 \x03(\x0b\x32\x15.PEDM.CollectionValue\x12\x18\n\x10removeCollection\x18\x03 \x03(\x0c\"{\n\x18SetCollectionLinkRequest\x12/\n\raddCollection\x18\x01 \x03(\x0b\x32\x18.PEDM.CollectionLinkData\x12.\n\x10removeCollection\x18\x02 \x03(\x0b\x32\x14.PEDM.CollectionLink\";\n\x12\x41pprovalExtendData\x12\x13\n\x0b\x61pprovalUid\x18\x01 \x01(\x0c\x12\x10\n\x08\x65xpireIn\x18\x02 \x01(\x05\"I\n\x15ModifyApprovalRequest\x12\x30\n\x0e\x65xtendApproval\x18\x01 \x03(\x0b\x32\x18.PEDM.ApprovalExtendData\"F\n\x15\x41pprovalActionRequest\x12\x0f\n\x07\x61pprove\x18\x01 \x03(\x0c\x12\x0c\n\x04\x64\x65ny\x18\x02 \x03(\x0c\x12\x0e\n\x06remove\x18\x03 \x03(\x0c\"\xab\x01\n\x0e\x44\x65ploymentNode\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x02 \x01(\x08\x12\x0e\n\x06\x61\x65sKey\x18\x03 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x04 \x01(\x0c\x12\x15\n\rencryptedData\x18\x05 \x01(\x0c\x12\x11\n\tagentData\x18\x06 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x07 \x01(\x03\x12\x10\n\x08modified\x18\x08 \x01(\x03\"\xa8\x01\n\tAgentNode\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\x12\x11\n\tmachineId\x18\x02 \x01(\t\x12\x15\n\rdeploymentUid\x18\x03 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x04 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x05 \x01(\x08\x12\x15\n\rencryptedData\x18\x06 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x07 \x01(\x03\x12\x10\n\x08modified\x18\x08 \x01(\x03\"\x94\x01\n\nPolicyNode\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x14\n\x0c\x65ncryptedKey\x18\x04 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x05 \x01(\x03\x12\x10\n\x08modified\x18\x06 \x01(\x03\x12\x10\n\x08\x64isabled\x18\x07 \x01(\x08\"g\n\x0e\x43ollectionNode\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x16\n\x0e\x63ollectionType\x18\x02 \x01(\x05\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x03\"d\n\x0e\x43ollectionLink\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x0f\n\x07linkUid\x18\x02 \x01(\x0c\x12*\n\x08linkType\x18\x03 \x01(\x0e\x32\x18.PEDM.CollectionLinkType\"\x9d\x01\n\x12\x41pprovalStatusNode\x12\x13\n\x0b\x61pprovalUid\x18\x01 \x01(\x0c\x12\x46\n\x0e\x61pprovalStatus\x18\x02 \x01(\x0e\x32..NotificationCenter.NotificationApprovalStatus\x12\x18\n\x10\x65nterpriseUserId\x18\x03 \x01(\x03\x12\x10\n\x08modified\x18\n \x01(\x03\"\xb3\x01\n\x0c\x41pprovalNode\x12\x13\n\x0b\x61pprovalUid\x18\x01 \x01(\x0c\x12\x14\n\x0c\x61pprovalType\x18\x02 \x01(\x05\x12\x10\n\x08\x61gentUid\x18\x03 \x01(\x0c\x12\x13\n\x0b\x61\x63\x63ountInfo\x18\x04 \x01(\x0c\x12\x17\n\x0f\x61pplicationInfo\x18\x05 \x01(\x0c\x12\x15\n\rjustification\x18\x06 \x01(\x0c\x12\x10\n\x08\x65xpireIn\x18\x07 \x01(\x05\x12\x0f\n\x07\x63reated\x18\n \x01(\x03\"C\n\rFullSyncToken\x12\x15\n\rstartRevision\x18\x01 \x01(\x03\x12\x0e\n\x06\x65ntity\x18\x02 \x01(\x05\x12\x0b\n\x03key\x18\x03 \x03(\x0c\"$\n\x0cIncSyncToken\x12\x14\n\x0clastRevision\x18\x02 \x01(\x03\"h\n\rPedmSyncToken\x12\'\n\x08\x66ullSync\x18\x02 \x01(\x0b\x32\x13.PEDM.FullSyncTokenH\x00\x12%\n\x07incSync\x18\x03 \x01(\x0b\x32\x12.PEDM.IncSyncTokenH\x00\x42\x07\n\x05token\"/\n\x12GetPedmDataRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\"\xad\x04\n\x13GetPedmDataResponse\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x12\n\nresetCache\x18\x02 \x01(\x08\x12\x0f\n\x07hasMore\x18\x03 \x01(\x08\x12\x1a\n\x12removedDeployments\x18\n \x03(\x0c\x12\x15\n\rremovedAgents\x18\x0b \x03(\x0c\x12\x17\n\x0fremovedPolicies\x18\x0c \x03(\x0c\x12\x19\n\x11removedCollection\x18\r \x03(\x0c\x12\x33\n\x15removedCollectionLink\x18\x0e \x03(\x0b\x32\x14.PEDM.CollectionLink\x12\x18\n\x10removedApprovals\x18\x0f \x03(\x0c\x12)\n\x0b\x64\x65ployments\x18\x14 \x03(\x0b\x32\x14.PEDM.DeploymentNode\x12\x1f\n\x06\x61gents\x18\x15 \x03(\x0b\x32\x0f.PEDM.AgentNode\x12\"\n\x08policies\x18\x16 \x03(\x0b\x32\x10.PEDM.PolicyNode\x12)\n\x0b\x63ollections\x18\x17 \x03(\x0b\x32\x14.PEDM.CollectionNode\x12,\n\x0e\x63ollectionLink\x18\x18 \x03(\x0b\x32\x14.PEDM.CollectionLink\x12%\n\tapprovals\x18\x19 \x03(\x0b\x32\x12.PEDM.ApprovalNode\x12\x30\n\x0e\x61pprovalStatus\x18\x1a \x03(\x0b\x32\x18.PEDM.ApprovalStatusNode\"<\n\x12PolicyAgentRequest\x12\x11\n\tpolicyUid\x18\x01 \x03(\x0c\x12\x13\n\x0bsummaryOnly\x18\x02 \x01(\x08\";\n\x13PolicyAgentResponse\x12\x12\n\nagentCount\x18\x01 \x01(\x05\x12\x10\n\x08\x61gentUid\x18\x02 \x03(\x0c\"]\n\x16\x41uditCollectionRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x10\n\x08valueUid\x18\x02 \x03(\x0c\x12\x16\n\x0e\x63ollectionName\x18\x03 \x03(\t\"h\n\x14\x41uditCollectionValue\x12\x16\n\x0e\x63ollectionName\x18\x01 \x01(\t\x12\x10\n\x08valueUid\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x03\"q\n\x17\x41uditCollectionResponse\x12*\n\x06values\x18\x01 \x03(\x0b\x32\x1a.PEDM.AuditCollectionValue\x12\x0f\n\x07hasMore\x18\x02 \x01(\x08\x12\x19\n\x11\x63ontinuationToken\x18\x03 \x01(\x0c\"H\n\x18GetCollectionLinkRequest\x12,\n\x0e\x63ollectionLink\x18\x01 \x03(\x0b\x32\x14.PEDM.CollectionLink\"Q\n\x19GetCollectionLinkResponse\x12\x34\n\x12\x63ollectionLinkData\x18\x01 \x03(\x0b\x32\x18.PEDM.CollectionLinkData\"\xaa\x01\n\x1bOfflineAgentRegisterRequest\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\x12\x15\n\rdeploymentUid\x18\x02 \x01(\x0c\x12\x11\n\tpublicKey\x18\x03 \x01(\x0c\x12\x11\n\tmachineId\x18\x04 \x01(\t\x12)\n\ncollection\x18\x05 \x03(\x0b\x32\x15.PEDM.CollectionValue\x12\x11\n\tagentData\x18\x07 \x01(\x0c\"0\n\x1cOfflineAgentRegisterResponse\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\"/\n\x1bOfflineAgentSyncDownRequest\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\"9\n\x1cOfflineAgentSyncDownResponse\x12\x19\n\x11\x65ncryptedSyncData\x18\x01 \x01(\x0c\"?\n\x17GetAgentLastSeenRequest\x12\x12\n\nactiveOnly\x18\x01 \x01(\x08\x12\x10\n\x08\x61gentUid\x18\x02 \x03(\x0c\"3\n\rAgentLastSeen\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\x12\x10\n\x08lastSeen\x18\x02 \x01(\x03\"A\n\x18GetAgentLastSeenResponse\x12%\n\x08lastSeen\x18\x01 \x03(\x0b\x32\x13.PEDM.AgentLastSeen\"2\n\x1aGetActiveAgentCountRequest\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x03(\x05\">\n\x10\x41\x63tiveAgentCount\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x14\n\x0c\x61\x63tiveAgents\x18\x02 \x01(\x05\";\n\x12\x41\x63tiveAgentFailure\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"x\n\x1bGetActiveAgentCountResponse\x12*\n\nagentCount\x18\x01 \x03(\x0b\x32\x16.PEDM.ActiveAgentCount\x12-\n\x0b\x66\x61iledCount\x18\x02 \x03(\x0b\x32\x18.PEDM.ActiveAgentFailure\"\x87\x01\n\x19GetAgentDailyCountRequest\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x03(\x05\x12$\n\tmonthYear\x18\x02 \x01(\x0b\x32\x0f.PEDM.MonthYearH\x00\x12$\n\tdateRange\x18\x03 \x01(\x0b\x32\x0f.PEDM.DateRangeH\x00\x42\x08\n\x06period\"(\n\tMonthYear\x12\r\n\x05month\x18\x01 \x01(\x05\x12\x0c\n\x04year\x18\x02 \x01(\x05\"\'\n\tDateRange\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x03\"3\n\x0f\x41gentDailyCount\x12\x0c\n\x04\x64\x61te\x18\x01 \x01(\x03\x12\x12\n\nagentCount\x18\x02 \x01(\x05\"V\n\x17\x41gentCountForEnterprise\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12%\n\x06\x63ounts\x18\x02 \x03(\x0b\x32\x15.PEDM.AgentDailyCount\"U\n\x1aGetAgentDailyCountResponse\x12\x37\n\x10\x65nterpriseCounts\x18\x01 \x03(\x0b\x32\x1d.PEDM.AgentCountForEnterprise*j\n\x12\x43ollectionLinkType\x12\r\n\tCLT_OTHER\x10\x00\x12\r\n\tCLT_AGENT\x10\x01\x12\x0e\n\nCLT_POLICY\x10\x02\x12\x12\n\x0e\x43LT_COLLECTION\x10\x03\x12\x12\n\x0e\x43LT_DEPLOYMENT\x10\x04\x42 \n\x18\x63om.keepersecurity.protoB\x04PEDMb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -23,8 +23,8 @@ if _descriptor._USE_C_DESCRIPTORS == False: _globals['DESCRIPTOR']._options = None _globals['DESCRIPTOR']._serialized_options = b'\n\030com.keepersecurity.protoB\004PEDM' - _globals['_COLLECTIONLINKTYPE']._serialized_start=5286 - _globals['_COLLECTIONLINKTYPE']._serialized_end=5392 + _globals['_COLLECTIONLINKTYPE']._serialized_start=5938 + _globals['_COLLECTIONLINKTYPE']._serialized_end=6044 _globals['_PEDMTOTPVALIDATEREQUEST']._serialized_start=60 _globals['_PEDMTOTPVALIDATEREQUEST']._serialized_end=139 _globals['_PEDMSTATUS']._serialized_start=141 @@ -61,64 +61,82 @@ _globals['_COLLECTIONREQUEST']._serialized_end=1876 _globals['_SETCOLLECTIONLINKREQUEST']._serialized_start=1878 _globals['_SETCOLLECTIONLINKREQUEST']._serialized_end=2001 - _globals['_APPROVALACTIONREQUEST']._serialized_start=2003 - _globals['_APPROVALACTIONREQUEST']._serialized_end=2073 - _globals['_DEPLOYMENTNODE']._serialized_start=2076 - _globals['_DEPLOYMENTNODE']._serialized_end=2247 - _globals['_AGENTNODE']._serialized_start=2250 - _globals['_AGENTNODE']._serialized_end=2418 - _globals['_POLICYNODE']._serialized_start=2421 - _globals['_POLICYNODE']._serialized_end=2569 - _globals['_COLLECTIONNODE']._serialized_start=2571 - _globals['_COLLECTIONNODE']._serialized_end=2674 - _globals['_COLLECTIONLINK']._serialized_start=2676 - _globals['_COLLECTIONLINK']._serialized_end=2776 - _globals['_APPROVALSTATUSNODE']._serialized_start=2779 - _globals['_APPROVALSTATUSNODE']._serialized_end=2936 - _globals['_APPROVALNODE']._serialized_start=2939 - _globals['_APPROVALNODE']._serialized_end=3118 - _globals['_FULLSYNCTOKEN']._serialized_start=3120 - _globals['_FULLSYNCTOKEN']._serialized_end=3187 - _globals['_INCSYNCTOKEN']._serialized_start=3189 - _globals['_INCSYNCTOKEN']._serialized_end=3225 - _globals['_PEDMSYNCTOKEN']._serialized_start=3227 - _globals['_PEDMSYNCTOKEN']._serialized_end=3331 - _globals['_GETPEDMDATAREQUEST']._serialized_start=3333 - _globals['_GETPEDMDATAREQUEST']._serialized_end=3380 - _globals['_GETPEDMDATARESPONSE']._serialized_start=3383 - _globals['_GETPEDMDATARESPONSE']._serialized_end=3940 - _globals['_POLICYAGENTREQUEST']._serialized_start=3942 - _globals['_POLICYAGENTREQUEST']._serialized_end=4002 - _globals['_POLICYAGENTRESPONSE']._serialized_start=4004 - _globals['_POLICYAGENTRESPONSE']._serialized_end=4063 - _globals['_AUDITCOLLECTIONREQUEST']._serialized_start=4065 - _globals['_AUDITCOLLECTIONREQUEST']._serialized_end=4158 - _globals['_AUDITCOLLECTIONVALUE']._serialized_start=4160 - _globals['_AUDITCOLLECTIONVALUE']._serialized_end=4264 - _globals['_AUDITCOLLECTIONRESPONSE']._serialized_start=4266 - _globals['_AUDITCOLLECTIONRESPONSE']._serialized_end=4379 - _globals['_GETCOLLECTIONLINKREQUEST']._serialized_start=4381 - _globals['_GETCOLLECTIONLINKREQUEST']._serialized_end=4453 - _globals['_GETCOLLECTIONLINKRESPONSE']._serialized_start=4455 - _globals['_GETCOLLECTIONLINKRESPONSE']._serialized_end=4536 - _globals['_GETACTIVEAGENTCOUNTREQUEST']._serialized_start=4538 - _globals['_GETACTIVEAGENTCOUNTREQUEST']._serialized_end=4588 - _globals['_ACTIVEAGENTCOUNT']._serialized_start=4590 - _globals['_ACTIVEAGENTCOUNT']._serialized_end=4652 - _globals['_ACTIVEAGENTFAILURE']._serialized_start=4654 - _globals['_ACTIVEAGENTFAILURE']._serialized_end=4713 - _globals['_GETACTIVEAGENTCOUNTRESPONSE']._serialized_start=4715 - _globals['_GETACTIVEAGENTCOUNTRESPONSE']._serialized_end=4835 - _globals['_GETAGENTDAILYCOUNTREQUEST']._serialized_start=4838 - _globals['_GETAGENTDAILYCOUNTREQUEST']._serialized_end=4973 - _globals['_MONTHYEAR']._serialized_start=4975 - _globals['_MONTHYEAR']._serialized_end=5015 - _globals['_DATERANGE']._serialized_start=5017 - _globals['_DATERANGE']._serialized_end=5056 - _globals['_AGENTDAILYCOUNT']._serialized_start=5058 - _globals['_AGENTDAILYCOUNT']._serialized_end=5109 - _globals['_AGENTCOUNTFORENTERPRISE']._serialized_start=5111 - _globals['_AGENTCOUNTFORENTERPRISE']._serialized_end=5197 - _globals['_GETAGENTDAILYCOUNTRESPONSE']._serialized_start=5199 - _globals['_GETAGENTDAILYCOUNTRESPONSE']._serialized_end=5284 + _globals['_APPROVALEXTENDDATA']._serialized_start=2003 + _globals['_APPROVALEXTENDDATA']._serialized_end=2062 + _globals['_MODIFYAPPROVALREQUEST']._serialized_start=2064 + _globals['_MODIFYAPPROVALREQUEST']._serialized_end=2137 + _globals['_APPROVALACTIONREQUEST']._serialized_start=2139 + _globals['_APPROVALACTIONREQUEST']._serialized_end=2209 + _globals['_DEPLOYMENTNODE']._serialized_start=2212 + _globals['_DEPLOYMENTNODE']._serialized_end=2383 + _globals['_AGENTNODE']._serialized_start=2386 + _globals['_AGENTNODE']._serialized_end=2554 + _globals['_POLICYNODE']._serialized_start=2557 + _globals['_POLICYNODE']._serialized_end=2705 + _globals['_COLLECTIONNODE']._serialized_start=2707 + _globals['_COLLECTIONNODE']._serialized_end=2810 + _globals['_COLLECTIONLINK']._serialized_start=2812 + _globals['_COLLECTIONLINK']._serialized_end=2912 + _globals['_APPROVALSTATUSNODE']._serialized_start=2915 + _globals['_APPROVALSTATUSNODE']._serialized_end=3072 + _globals['_APPROVALNODE']._serialized_start=3075 + _globals['_APPROVALNODE']._serialized_end=3254 + _globals['_FULLSYNCTOKEN']._serialized_start=3256 + _globals['_FULLSYNCTOKEN']._serialized_end=3323 + _globals['_INCSYNCTOKEN']._serialized_start=3325 + _globals['_INCSYNCTOKEN']._serialized_end=3361 + _globals['_PEDMSYNCTOKEN']._serialized_start=3363 + _globals['_PEDMSYNCTOKEN']._serialized_end=3467 + _globals['_GETPEDMDATAREQUEST']._serialized_start=3469 + _globals['_GETPEDMDATAREQUEST']._serialized_end=3516 + _globals['_GETPEDMDATARESPONSE']._serialized_start=3519 + _globals['_GETPEDMDATARESPONSE']._serialized_end=4076 + _globals['_POLICYAGENTREQUEST']._serialized_start=4078 + _globals['_POLICYAGENTREQUEST']._serialized_end=4138 + _globals['_POLICYAGENTRESPONSE']._serialized_start=4140 + _globals['_POLICYAGENTRESPONSE']._serialized_end=4199 + _globals['_AUDITCOLLECTIONREQUEST']._serialized_start=4201 + _globals['_AUDITCOLLECTIONREQUEST']._serialized_end=4294 + _globals['_AUDITCOLLECTIONVALUE']._serialized_start=4296 + _globals['_AUDITCOLLECTIONVALUE']._serialized_end=4400 + _globals['_AUDITCOLLECTIONRESPONSE']._serialized_start=4402 + _globals['_AUDITCOLLECTIONRESPONSE']._serialized_end=4515 + _globals['_GETCOLLECTIONLINKREQUEST']._serialized_start=4517 + _globals['_GETCOLLECTIONLINKREQUEST']._serialized_end=4589 + _globals['_GETCOLLECTIONLINKRESPONSE']._serialized_start=4591 + _globals['_GETCOLLECTIONLINKRESPONSE']._serialized_end=4672 + _globals['_OFFLINEAGENTREGISTERREQUEST']._serialized_start=4675 + _globals['_OFFLINEAGENTREGISTERREQUEST']._serialized_end=4845 + _globals['_OFFLINEAGENTREGISTERRESPONSE']._serialized_start=4847 + _globals['_OFFLINEAGENTREGISTERRESPONSE']._serialized_end=4895 + _globals['_OFFLINEAGENTSYNCDOWNREQUEST']._serialized_start=4897 + _globals['_OFFLINEAGENTSYNCDOWNREQUEST']._serialized_end=4944 + _globals['_OFFLINEAGENTSYNCDOWNRESPONSE']._serialized_start=4946 + _globals['_OFFLINEAGENTSYNCDOWNRESPONSE']._serialized_end=5003 + _globals['_GETAGENTLASTSEENREQUEST']._serialized_start=5005 + _globals['_GETAGENTLASTSEENREQUEST']._serialized_end=5068 + _globals['_AGENTLASTSEEN']._serialized_start=5070 + _globals['_AGENTLASTSEEN']._serialized_end=5121 + _globals['_GETAGENTLASTSEENRESPONSE']._serialized_start=5123 + _globals['_GETAGENTLASTSEENRESPONSE']._serialized_end=5188 + _globals['_GETACTIVEAGENTCOUNTREQUEST']._serialized_start=5190 + _globals['_GETACTIVEAGENTCOUNTREQUEST']._serialized_end=5240 + _globals['_ACTIVEAGENTCOUNT']._serialized_start=5242 + _globals['_ACTIVEAGENTCOUNT']._serialized_end=5304 + _globals['_ACTIVEAGENTFAILURE']._serialized_start=5306 + _globals['_ACTIVEAGENTFAILURE']._serialized_end=5365 + _globals['_GETACTIVEAGENTCOUNTRESPONSE']._serialized_start=5367 + _globals['_GETACTIVEAGENTCOUNTRESPONSE']._serialized_end=5487 + _globals['_GETAGENTDAILYCOUNTREQUEST']._serialized_start=5490 + _globals['_GETAGENTDAILYCOUNTREQUEST']._serialized_end=5625 + _globals['_MONTHYEAR']._serialized_start=5627 + _globals['_MONTHYEAR']._serialized_end=5667 + _globals['_DATERANGE']._serialized_start=5669 + _globals['_DATERANGE']._serialized_end=5708 + _globals['_AGENTDAILYCOUNT']._serialized_start=5710 + _globals['_AGENTDAILYCOUNT']._serialized_end=5761 + _globals['_AGENTCOUNTFORENTERPRISE']._serialized_start=5763 + _globals['_AGENTCOUNTFORENTERPRISE']._serialized_end=5849 + _globals['_GETAGENTDAILYCOUNTRESPONSE']._serialized_start=5851 + _globals['_GETAGENTDAILYCOUNTRESPONSE']._serialized_end=5936 # @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/pedm_pb2.pyi b/keepercommander/proto/pedm_pb2.pyi index a0d8ca17b..88decca11 100644 --- a/keepercommander/proto/pedm_pb2.pyi +++ b/keepercommander/proto/pedm_pb2.pyi @@ -205,6 +205,20 @@ class SetCollectionLinkRequest(_message.Message): removeCollection: _containers.RepeatedCompositeFieldContainer[CollectionLink] def __init__(self, addCollection: _Optional[_Iterable[_Union[CollectionLinkData, _Mapping]]] = ..., removeCollection: _Optional[_Iterable[_Union[CollectionLink, _Mapping]]] = ...) -> None: ... +class ApprovalExtendData(_message.Message): + __slots__ = ["approvalUid", "expireIn"] + APPROVALUID_FIELD_NUMBER: _ClassVar[int] + EXPIREIN_FIELD_NUMBER: _ClassVar[int] + approvalUid: bytes + expireIn: int + def __init__(self, approvalUid: _Optional[bytes] = ..., expireIn: _Optional[int] = ...) -> None: ... + +class ModifyApprovalRequest(_message.Message): + __slots__ = ["extendApproval"] + EXTENDAPPROVAL_FIELD_NUMBER: _ClassVar[int] + extendApproval: _containers.RepeatedCompositeFieldContainer[ApprovalExtendData] + def __init__(self, extendApproval: _Optional[_Iterable[_Union[ApprovalExtendData, _Mapping]]] = ...) -> None: ... + class ApprovalActionRequest(_message.Message): __slots__ = ["approve", "deny", "remove"] APPROVE_FIELD_NUMBER: _ClassVar[int] @@ -453,6 +467,62 @@ class GetCollectionLinkResponse(_message.Message): collectionLinkData: _containers.RepeatedCompositeFieldContainer[CollectionLinkData] def __init__(self, collectionLinkData: _Optional[_Iterable[_Union[CollectionLinkData, _Mapping]]] = ...) -> None: ... +class OfflineAgentRegisterRequest(_message.Message): + __slots__ = ["agentUid", "deploymentUid", "publicKey", "machineId", "collection", "agentData"] + AGENTUID_FIELD_NUMBER: _ClassVar[int] + DEPLOYMENTUID_FIELD_NUMBER: _ClassVar[int] + PUBLICKEY_FIELD_NUMBER: _ClassVar[int] + MACHINEID_FIELD_NUMBER: _ClassVar[int] + COLLECTION_FIELD_NUMBER: _ClassVar[int] + AGENTDATA_FIELD_NUMBER: _ClassVar[int] + agentUid: bytes + deploymentUid: bytes + publicKey: bytes + machineId: str + collection: _containers.RepeatedCompositeFieldContainer[CollectionValue] + agentData: bytes + def __init__(self, agentUid: _Optional[bytes] = ..., deploymentUid: _Optional[bytes] = ..., publicKey: _Optional[bytes] = ..., machineId: _Optional[str] = ..., collection: _Optional[_Iterable[_Union[CollectionValue, _Mapping]]] = ..., agentData: _Optional[bytes] = ...) -> None: ... + +class OfflineAgentRegisterResponse(_message.Message): + __slots__ = ["agentUid"] + AGENTUID_FIELD_NUMBER: _ClassVar[int] + agentUid: bytes + def __init__(self, agentUid: _Optional[bytes] = ...) -> None: ... + +class OfflineAgentSyncDownRequest(_message.Message): + __slots__ = ["agentUid"] + AGENTUID_FIELD_NUMBER: _ClassVar[int] + agentUid: bytes + def __init__(self, agentUid: _Optional[bytes] = ...) -> None: ... + +class OfflineAgentSyncDownResponse(_message.Message): + __slots__ = ["encryptedSyncData"] + ENCRYPTEDSYNCDATA_FIELD_NUMBER: _ClassVar[int] + encryptedSyncData: bytes + def __init__(self, encryptedSyncData: _Optional[bytes] = ...) -> None: ... + +class GetAgentLastSeenRequest(_message.Message): + __slots__ = ["activeOnly", "agentUid"] + ACTIVEONLY_FIELD_NUMBER: _ClassVar[int] + AGENTUID_FIELD_NUMBER: _ClassVar[int] + activeOnly: bool + agentUid: _containers.RepeatedScalarFieldContainer[bytes] + def __init__(self, activeOnly: bool = ..., agentUid: _Optional[_Iterable[bytes]] = ...) -> None: ... + +class AgentLastSeen(_message.Message): + __slots__ = ["agentUid", "lastSeen"] + AGENTUID_FIELD_NUMBER: _ClassVar[int] + LASTSEEN_FIELD_NUMBER: _ClassVar[int] + agentUid: bytes + lastSeen: int + def __init__(self, agentUid: _Optional[bytes] = ..., lastSeen: _Optional[int] = ...) -> None: ... + +class GetAgentLastSeenResponse(_message.Message): + __slots__ = ["lastSeen"] + LASTSEEN_FIELD_NUMBER: _ClassVar[int] + lastSeen: _containers.RepeatedCompositeFieldContainer[AgentLastSeen] + def __init__(self, lastSeen: _Optional[_Iterable[_Union[AgentLastSeen, _Mapping]]] = ...) -> None: ... + class GetActiveAgentCountRequest(_message.Message): __slots__ = ["enterpriseId"] ENTERPRISEID_FIELD_NUMBER: _ClassVar[int] From 8bedf987683e738b43749a76bc1a95dda785f0ad Mon Sep 17 00:00:00 2001 From: Sergey Kolupaev Date: Thu, 22 Jan 2026 08:34:30 -0800 Subject: [PATCH 23/24] KEPM: escalated approval status --- keepercommander/commands/pedm/pedm_admin.py | 11 +++++++---- keepercommander/pedm/admin_plugin.py | 5 ++++- keepercommander/pedm/pedm_shared.py | 10 +++++----- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/keepercommander/commands/pedm/pedm_admin.py b/keepercommander/commands/pedm/pedm_admin.py index dcc3410a0..d63ec4f93 100644 --- a/keepercommander/commands/pedm/pedm_admin.py +++ b/keepercommander/commands/pedm/pedm_admin.py @@ -2099,7 +2099,7 @@ class PedmApprovalListCommand(base.ArgparseCommand): def __init__(self): parser = argparse.ArgumentParser(prog='list', description='List EPM approval requests', parents=[base.report_output_parser]) - parser.add_argument('--type', dest='type', action='store', choices=['approved', 'denied', 'pending', 'expired'], + parser.add_argument('--type', dest='type', action='store', choices=['approved', 'denied', 'pending', 'expired', 'escalated'], help='approval type filter') super().__init__(parser) @@ -2146,7 +2146,7 @@ def __init__(self): parser.add_argument('--deny', dest='deny', action='append', help='Request UIDs for denial') parser.add_argument('--remove', dest='remove', action='append', - help='Request UIDs for removal. UID, @approved, @denied, @expired, @pending') + help='Request UIDs for removal. UID, @approved, @denied, @expired, @escalated, @pending') super().__init__(parser) def execute(self, context: KeeperParams, **kwargs) -> None: @@ -2186,10 +2186,13 @@ def verify_uid(uids: Any) -> Optional[List[bytes]]: (utils.base64_url_decode(x.approval_uid) for x in plugin.storage.approval_status.get_all_entities() if x.approval_status == NotificationCenter_pb2.NAS_DENIED)) elif uid == '@pending': to_remove_set.update( - (utils.base64_url_decode(x.approval_uid) for x in plugin.storage.approval_status.get_all_entities() if x.approval_status == NotificationCenter_pb2.NAS_UNSPECIFIED and x.modified >= expire_ts)) + (utils.base64_url_decode(x.approval_uid) for x in plugin.storage.approval_status.get_all_entities() if x.approval_status == NotificationCenter_pb2.NAS_UNSPECIFIED)) elif uid == '@expired': to_remove_set.update( - (utils.base64_url_decode(x.approval_uid) for x in plugin.storage.approval_status.get_all_entities() if x.approval_status == NotificationCenter_pb2.NAS_UNSPECIFIED and x.modified < expire_ts)) + (utils.base64_url_decode(x.approval_uid) for x in plugin.storage.approval_status.get_all_entities() if x.approval_status == NotificationCenter_pb2.NAS_LOST_APPROVAL_RIGHTS)) + elif uid == '@escalated': + to_remove_set.update( + (utils.base64_url_decode(x.approval_uid) for x in plugin.storage.approval_status.get_all_entities() if x.approval_status == NotificationCenter_pb2.NAS_ESCALATED)) else: to_resolve.append(uid) if len(to_resolve) > 0: diff --git a/keepercommander/pedm/admin_plugin.py b/keepercommander/pedm/admin_plugin.py index 4dd21c57b..682afb743 100644 --- a/keepercommander/pedm/admin_plugin.py +++ b/keepercommander/pedm/admin_plugin.py @@ -311,7 +311,10 @@ def get_collections() -> Iterable[admin_storage.PedmStorageCollection]: collections: List[admin_types.PedmCollection] = [] for collection_dto in get_collections(): try: - collection_value = crypto.decrypt_aes_v2(collection_dto.data, self.agent_key).decode('utf-8') + if collection_dto.collection_type in (1000, 1001, 1002): + collection_value = collection_dto.data.decode('utf-8') + else: + collection_value = crypto.decrypt_aes_v2(collection_dto.data, self.agent_key).decode('utf-8') collection_data = json.loads(collection_value) collection = admin_types.PedmCollection( collection_uid=collection_dto.collection_uid, collection_type=collection_dto.collection_type, diff --git a/keepercommander/pedm/pedm_shared.py b/keepercommander/pedm/pedm_shared.py index 05e40a9f0..eed1dad3e 100644 --- a/keepercommander/pedm/pedm_shared.py +++ b/keepercommander/pedm/pedm_shared.py @@ -111,12 +111,12 @@ def approval_status_to_name(approval_status: int, created: datetime.datetime, ex return 'Approved' elif approval_status == NotificationCenter_pb2.NAS_DENIED: return 'Denied' + elif approval_status == NotificationCenter_pb2.NAS_ESCALATED: + return 'Escalated' + elif approval_status == NotificationCenter_pb2.NAS_LOST_APPROVAL_RIGHTS: + return 'Expired' elif approval_status == NotificationCenter_pb2.NAS_UNSPECIFIED: - status = 'Pending' - expire_time = created + datetime.timedelta(minutes=expire_in) - if expire_time < datetime.datetime.now(): - status = 'Expired' - return status + return 'Pending' else: return 'Unsupported' From e7c2a26df1caf316d5dbb8d02177abe3c033fecf Mon Sep 17 00:00:00 2001 From: Sergey Kolupaev Date: Thu, 22 Jan 2026 08:37:19 -0800 Subject: [PATCH 24/24] Release 17.2.5 --- keepercommander/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keepercommander/__init__.py b/keepercommander/__init__.py index a1bba2d35..73bac478d 100644 --- a/keepercommander/__init__.py +++ b/keepercommander/__init__.py @@ -10,4 +10,4 @@ # Contact: ops@keepersecurity.com # -__version__ = '17.2.4' +__version__ = '17.2.5'