diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..68e9ed0e0 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,115 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Testing +.tox/ +.nox/ +.coverage +.pytest_cache/ +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +tests/ +unit-tests/ +test_*.py +*_test.py + +# Virtual environments +venv/ +env/ +ENV/ +env.bak/ +venv.bak/ +.venv/ + +# Development tools and configs +.git/ +.gitignore +.gitattributes +pylintrc +pytest.ini +.pylintrc +tox.ini +.flake8 +.bandit +mypy.ini +.mypy.ini + +# IDE and editor files +.vscode/ +.idea/ +*.swp +*.swo +*~ +.DS_Store +Thumbs.db + +# Documentation +README.md +*.md +docs/ +RECORD_ADD_DOCUMENTATION.md +record_types.md + +# Examples and sample data +examples/ +sample_data/ + +# Development dependencies +requirements-dev.txt +extra_dependencies.txt + +# Logs and temporary files +*.log +.log +tmp/ +temp/ +logs/ +**/logs/ +keepercommander/service/core/bin/ +keepercommander/service/core/logs/ + +# OS generated files +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db + +# Docker related (to avoid recursive copies) +Dockerfile* +docker-compose*.yml +docker-compose*.yaml +.dockerignore + +# Other development artifacts +.editorconfig +.pre-commit-config.yaml +.github/ +keeper_file.spec +keeper_folder.spec +keeper-win-file.spec +desired-pylint-warnings diff --git a/Dockerfile b/Dockerfile index 0ffc62b34..f0ac1b30d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ FROM python:3.11-slim -ENV PYTHONUNBUFFERED 1 +ENV PYTHONUNBUFFERED=1 # Create a non-root user for security RUN groupadd --system --gid 1000 commander && \ diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index 0280dcf2c..661ede461 100644 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -6,11 +6,42 @@ log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" } -# Function to parse --user, --password, and --server from arguments +# Function to run keeper service with intelligent lifecycle management +run_keeper_service() { + local config_arg="$1" + local command_args="$2" + + # Check if command contains service-create + if [[ "$command_args" =~ service-create ]]; then + log "Service command detected, checking service status..." + + # Get service status + local service_status=$(python3 keeper.py $config_arg service-status 2>/dev/null) + + if echo "$service_status" | grep -q "Stopped"; then + log "Service exists but is stopped, starting it..." + python3 keeper.py $config_arg service-start + elif echo "$service_status" | grep -q "Running"; then + log "Service is already running, no action needed." + else + log "Service not found, creating new service..." + log "Running: python3 keeper.py $config_arg $command_args" + python3 keeper.py $config_arg $command_args + fi + else + # Not a service command, run as normal + python3 keeper.py $config_arg $command_args + fi +} + +# Function to parse --user, --password, --server, --ksm-config, --ksm-token, and --record from arguments parse_credentials() { USER="" PASSWORD="" SERVER="" + KSM_CONFIG="" + KSM_TOKEN="" + RECORD="" while [[ $# -gt 0 ]]; do case $1 in --user) @@ -25,6 +56,18 @@ parse_credentials() { SERVER="$2" shift 2 ;; + --ksm-config) + KSM_CONFIG="$2" + shift 2 + ;; + --ksm-token) + KSM_TOKEN="$2" + shift 2 + ;; + --record) + RECORD="$2" + shift 2 + ;; *) shift ;; @@ -37,26 +80,120 @@ setup_device() { local user="$1" local password="$2" local server="$3" - log "Setting up device registration and persistent login..." - # Step 1: Register device - log "Running: python3 keeper.py this-device register --user $user --password [HIDDEN] --server $server" - if ! python3 keeper.py this-device register --user "$user" --password "$password" --server $server; then + log "Registering device..." + if ! python3 keeper.py --user "$user" --password "$password" --server $server this-device register; then log "ERROR: Device registration failed" exit 1 fi # Step 2: Enable persistent login - log "Running: python3 keeper.py this-device persistent-login on --user $user --password [HIDDEN] --server $server" - if ! python3 keeper.py this-device persistent-login on --user "$user" --password "$password" --server $server; then + log "Enabling persistent login..." + if ! python3 keeper.py --user "$user" --password "$password" --server $server this-device persistent-login on; then log "ERROR: Persistent login setup failed" exit 1 fi - + + # Step 3: Set timeout + log "Setting device logout timeout to 30 Days..." + if ! python3 keeper.py --user "$user" --password "$password" --server "$server" \ + this-device timeout 43200 \ + > /dev/null; then + log "ERROR: Timeout setup failed" + exit 1 + fi + log "Device Logout Timeout set successfully" log "Device setup completed successfully" } -# Function to remove --user, --password, and --server from arguments and return the rest +# Function to download config.json from KSM record and save it to /home/commander/.keeper/ +download_config_from_ksm() { + local ksm_config_path="$1" + local ksm_token="$2" + local record_uid="$3" + + log "Downloading config.json from KSM record: $record_uid" + + # Use the KSM helper script for download + local helper_args="download --record-uid $record_uid --config-file /home/commander/.keeper/config.json" + + if [[ -n "$ksm_config_path" ]]; then + helper_args="$helper_args --ksm-config $ksm_config_path" + elif [[ -n "$ksm_token" ]]; then + helper_args="$helper_args --ksm-token $ksm_token" + else + log "ERROR: Neither KSM config path nor KSM token provided" + exit 1 + fi + + if ! python3 docker_ksm_utility.py $helper_args; then + log "ERROR: Failed to download config from KSM" + exit 1 + fi + + log "Config.json downloaded successfully from KSM record" +} + +# Function to start config.json monitoring and upload changes +start_config_monitor() { + local ksm_config_path="$1" + local ksm_token="$2" + local record_uid="$3" + local config_file_path="/home/commander/.keeper/config.json" + + log "Starting config.json monitoring for changes..." + + # Use the KSM helper script for monitoring + local helper_args="monitor --record-uid $record_uid --config-file $config_file_path" + + if [[ -n "$ksm_config_path" ]]; then + helper_args="$helper_args --ksm-config $ksm_config_path" + elif [[ -n "$ksm_token" ]]; then + helper_args="$helper_args --ksm-token $ksm_token" + else + log "ERROR: Neither KSM config path nor KSM token provided" + return 1 + fi + + # Start the monitoring in the background + nohup python3 docker_ksm_utility.py $helper_args > /home/commander/.keeper/config_monitor.log 2>&1 & + local monitor_pid=$! + echo "$monitor_pid" > /home/commander/.keeper/config_monitor.pid + + log "Monitor logs available at: /home/commander/.keeper/config_monitor.log" +} + +# Function to stop config.json monitoring +stop_config_monitor() { + local pid_file="/home/commander/.keeper/config_monitor.pid" + + if [[ -f "$pid_file" ]]; then + local monitor_pid=$(cat "$pid_file") + if kill -0 "$monitor_pid" 2>/dev/null; then + log "Stopping config monitor with PID: $monitor_pid" + kill "$monitor_pid" 2>/dev/null + log "Config monitor stopped successfully" + else + log "Config monitor process (PID: $monitor_pid) not found" + fi + rm -f "$pid_file" + fi + + # Clean up any remaining helper processes + pkill -f "docker_ksm_utility.py.*monitor" 2>/dev/null || true +} + +# Function to handle cleanup on exit +cleanup_on_exit() { + log "Performing cleanup on exit..." + stop_config_monitor + log "Cleanup completed" +} + +# Set up exit trap +trap cleanup_on_exit EXIT INT TERM + +# Function to remove --user, --password, --server, --ksm-config, --ksm-token, and --record from arguments and return the rest filter_args() { local filtered_args=() @@ -71,6 +208,15 @@ filter_args() { --server) shift 2 # Skip --server and its value ;; + --ksm-config) + shift 2 # Skip --ksm-config and its value + ;; + --ksm-token) + shift 2 # Skip --ksm-token and its value + ;; + --record) + shift 2 # Skip --record and its value + ;; *) filtered_args+=("$1") shift @@ -83,51 +229,103 @@ filter_args() { parse_credentials "$@" -# Set default server if not specified -if [[ -z "$SERVER" ]]; then - SERVER="keepersecurity.com" - log "Using default server: $SERVER" -else - log "Using specified server: $SERVER" -fi - -# Ensure .keeper directory exists (permissions set in Dockerfile) -log "Ensuring .keeper directory exists..." -log "Current user: $(whoami), UID: $(id -u), GID: $(id -g)" mkdir -p /home/commander/.keeper -log "Directory permissions: $(ls -ld /home/commander/.keeper)" +# Check if KSM authentication is requested +if [[ (-n "$KSM_CONFIG" || -n "$KSM_TOKEN") && -n "$RECORD" ]]; then + log "KSM authentication detected, downloading config from record: $RECORD" + + # Validate KSM authentication parameters + if [[ -n "$KSM_CONFIG" && -n "$KSM_TOKEN" ]]; then + log "ERROR: Cannot specify both --ksm-config and --ksm-token" + exit 1 + fi + + if [[ -n "$KSM_CONFIG" && ! -f "$KSM_CONFIG" ]]; then + log "ERROR: KSM config file not found: $KSM_CONFIG" + exit 1 + fi + + # Set environment variable to suppress KSM config file permission warnings + export KSM_CONFIG_SKIP_MODE_WARNING=TRUE + + # Download config.json from KSM record + download_config_from_ksm "$KSM_CONFIG" "$KSM_TOKEN" "$RECORD" + + # Start monitoring for config.json changes to upload back to KSM + start_config_monitor "$KSM_CONFIG" "$KSM_TOKEN" "$RECORD" + + # Filter out KSM arguments from command args + COMMAND_ARGS=$(filter_args "$@") + + # Check if there are any command arguments + if [[ -z "$COMMAND_ARGS" ]]; then + log "No command arguments provided, keeping container alive..." + sleep infinity + else + # Run the service command with downloaded config file + run_keeper_service "--config /home/commander/.keeper/config.json" "$COMMAND_ARGS" + log "Keeping container alive..." + sleep infinity + fi # Check if config.json is mounted -CONFIG_FILE="/home/commander/.keeper/config.json" -if [[ -f "$CONFIG_FILE" ]]; then +elif [[ -f "/home/commander/.keeper/config.json" ]]; then + CONFIG_FILE="/home/commander/.keeper/config.json" log "Config file found at $CONFIG_FILE, using config-based authentication" # Filter out --user and --password from arguments, keep the rest COMMAND_ARGS=$(filter_args "$@") - # Run the service command with config file - log "Running: python3 keeper.py --config $CONFIG_FILE $COMMAND_ARGS" - exec python3 keeper.py --config "$CONFIG_FILE" $COMMAND_ARGS + # Check if there are any command arguments + if [[ -z "$COMMAND_ARGS" ]]; then + log "No command arguments provided, keeping container alive..." + sleep infinity + else + # Run the service command with config file + run_keeper_service "--config $CONFIG_FILE" "$COMMAND_ARGS" + log "Keeping container alive..." + sleep infinity + fi elif [[ -n "$USER" && -n "$PASSWORD" ]]; then log "No config file found, using user/password authentication" - + # Set default server if not specified + if [[ -z "$SERVER" ]]; then + SERVER="keepersecurity.com" + log "Using default server: $SERVER" + else + log "Using specified server: $SERVER" + fi + # Setup device registration first setup_device "$USER" "$PASSWORD" "$SERVER" # Filter out --user, --password, and --server from arguments, keep the rest COMMAND_ARGS=$(filter_args "$@") - # Run the service-create command without credentials (device is now registered) - log "Running: python3 keeper.py $COMMAND_ARGS" - exec python3 keeper.py $COMMAND_ARGS + # Check if there are any command arguments + if [[ -z "$COMMAND_ARGS" ]]; then + log "Keeping container alive..." + sleep infinity + else + # Run the service command without credentials (device is now registered) + run_keeper_service "" "$COMMAND_ARGS" + log "Keeping container alive..." + sleep infinity + fi else - log "No config file found and no user/password provided, running command directly" - log "Note: Command may require authentication parameters to be passed directly" + log "No config file found and no user/password provided" # Filter out --user, --password, and --server from arguments, keep the rest COMMAND_ARGS=$(filter_args "$@") - # Run the command directly without any authentication setup - log "Running: python3 keeper.py $COMMAND_ARGS" - exec python3 keeper.py $COMMAND_ARGS + # Check if there are any command arguments + if [[ -z "$COMMAND_ARGS" ]]; then + log "Keeping container alive..." + sleep infinity + else + # Run the command directly without any authentication setup + run_keeper_service "" "$COMMAND_ARGS" + log "Keeping container alive..." + sleep infinity + fi fi \ No newline at end of file diff --git a/docker_ksm_utility.py b/docker_ksm_utility.py new file mode 100755 index 000000000..ec8a5853b --- /dev/null +++ b/docker_ksm_utility.py @@ -0,0 +1,396 @@ +#!/usr/bin/env python3 +""" +Docker KSM Utility - Centralized utility for KSM operations in Docker containers. +""" + +import sys +import os +import argparse +import time +import hashlib +from pathlib import Path + +def validate_file_path(file_path, base_dir=None): + """ + Validate file path to prevent directory traversal attacks. + + Args: + file_path (str): Path to validate + base_dir (str, optional): Base directory to restrict access to + + Returns: + tuple: (is_valid, resolved_path) + """ + try: + # Convert to Path object and resolve + path = Path(file_path).resolve() + + # Check for directory traversal attempts + if '..' in str(path) or str(path).startswith('/..'): + return False, None + + # If base_dir is specified, ensure path is within it + if base_dir: + base_path = Path(base_dir).resolve() + try: + path.relative_to(base_path) + except ValueError: + return False, None + + # Additional security checks + str_path = str(path) + dangerous_patterns = ['../', '..\\', '~/', '/etc/', '/proc/', '/sys/'] + if any(pattern in str_path for pattern in dangerous_patterns): + return False, None + + return True, str(path) + + except (OSError, ValueError): + return False, None + + +def check_ksm_dependency(): + """Check if keeper_secrets_manager_core is installed.""" + try: + import keeper_secrets_manager_core + return True + except ImportError: + print("ERROR: keeper_secrets_manager_core is not installed") + return False + +def download_config(ksm_config_path, ksm_token, record_uid, output_path): + """Download config.json from KSM record.""" + if not check_ksm_dependency(): + return False + + # Validate file paths + if ksm_config_path: + is_valid, validated_config_path = validate_file_path(ksm_config_path) + if not is_valid: + print("ERROR: Invalid KSM config file path") + return False + ksm_config_path = validated_config_path + + is_valid, validated_output_path = validate_file_path(output_path) + if not is_valid: + print("ERROR: Invalid output file path") + return False + output_path = validated_output_path + + from keeper_secrets_manager_core import SecretsManager + from keeper_secrets_manager_core.storage import FileKeyValueStorage + + try: + # Initialize SecretsManager + if ksm_config_path: + if not os.path.exists(ksm_config_path): + print("ERROR: KSM config file not found") + return False + secrets_manager = SecretsManager(config=FileKeyValueStorage(ksm_config_path)) + else: + secrets_manager = SecretsManager(token=ksm_token) + + # Get the record + secrets = secrets_manager.get_secrets([record_uid]) + if not secrets: + print("ERROR: Record not found") + return False + + secret = secrets[0] + + # Find config.json attachment + for file in secret.files: + if file.name.lower() == 'config.json': + print(f"Found config.json attachment: {file.name}") + # Ensure output directory exists + os.makedirs(os.path.dirname(output_path), exist_ok=True) + file.save_file(output_path, True) + print("Downloaded config.json successfully") + return True + + print("ERROR: config.json attachment not found in record") + return False + + except Exception as e: + print(f"ERROR: Failed to download config from KSM") + return False + +def _get_file_uid(file_obj): + """ + Extract file UID from file object. + + Args: + file_obj: File object from KSM + + Returns: + str: File UID or None if not found + """ + try: + # Try different ways to get the file UID + if hasattr(file_obj, 'f') and file_obj.f: + file_uid = file_obj.f.get('fileUid') + if file_uid: + return file_uid + + if hasattr(file_obj, 'fileUid'): + return file_obj.fileUid + + if hasattr(file_obj, 'uid'): + return file_obj.uid + + return None + + except Exception: + return None + +def _remove_existing_config_files(secrets_manager, secret, record_uid): + """ + Remove existing config.json files from KSM record. + + Args: + secrets_manager: KSM SecretsManager instance + secret: KSM secret object + record_uid: Record UID for error context + + Returns: + tuple: (success, updated_secret) + """ + try: + # Find existing config.json files + config_files = [f for f in secret.files if f.name.lower() == 'config.json'] + if len(config_files) == 0: + return True, secret + + files_to_remove = [] + for file_obj in config_files: + file_uid = _get_file_uid(file_obj) + if file_uid: + files_to_remove.append(file_uid) + print(f"Found config.json to remove UID: [REDACTED]") + else: + print(f"WARNING: Could not find UID for file: {file_obj.name}") + + if files_to_remove: + secrets_manager.save(secret, links_to_remove=files_to_remove) + print(f"Removed {len(files_to_remove)} config.json file(s)") + # Refresh the secret after removal + updated_secret = secrets_manager.get_secrets([record_uid])[0] + return True, updated_secret + + return True, secret + + except Exception: + print(f"WARNING: Failed to remove existing files") + return False, secret + +def _upload_new_config_file(secrets_manager, secret, config_file_path): + """ + Upload new config.json file to KSM record. + + Args: + secrets_manager: KSM SecretsManager instance + secret: KSM secret object + config_file_path: Path to local config file + + Returns: + bool: success + """ + from keeper_secrets_manager_core.core import KeeperFileUpload + + try: + # Validate config file exists and is readable + if not os.path.exists(config_file_path): + print("ERROR: Config file not found") + return False, None + + print("Uploading new config.json...") + my_file = KeeperFileUpload.from_file(config_file_path, 'config.json', 'config.json') + file_uid = secrets_manager.upload_file(secret, file=my_file) + print("Successfully uploaded new config.json") + return True + + except Exception as e: + print(f"ERROR: Failed to upload config file") + return False + +def upload_config(ksm_config_path, ksm_token, record_uid, config_file_path): + """Upload config.json to KSM record, removing existing ones first.""" + if not check_ksm_dependency(): + return False + + # Validate file paths + if ksm_config_path: + is_valid, validated_config_path = validate_file_path(ksm_config_path) + if not is_valid: + print("ERROR: Invalid KSM config file path") + return False + ksm_config_path = validated_config_path + + is_valid, validated_config_file_path = validate_file_path(config_file_path) + if not is_valid: + print("ERROR: Invalid config file path") + return False + config_file_path = validated_config_file_path + + from keeper_secrets_manager_core import SecretsManager + from keeper_secrets_manager_core.storage import FileKeyValueStorage + + try: + # Initialize SecretsManager + if ksm_config_path: + if not os.path.exists(ksm_config_path): + print("ERROR: KSM config file not found") + return False + secrets_manager = SecretsManager(config=FileKeyValueStorage(ksm_config_path)) + else: + secrets_manager = SecretsManager(token=ksm_token) + + # Get the record + secrets = secrets_manager.get_secrets([record_uid]) + if not secrets: + print("ERROR: Record not found") + return False + + secret = secrets[0] + + # Remove existing config.json files + success, updated_secret = _remove_existing_config_files(secrets_manager, secret, record_uid) + if not success: + return False + + # Upload new config.json file + success = _upload_new_config_file(secrets_manager, updated_secret, config_file_path) + return success + + except Exception: + print(f"ERROR: Failed to upload config") + return False + +def _get_secure_file_hash(file_path): + """ + Securely calculate file hash with proper error handling. + + Args: + file_path (str): Path to file + + Returns: + str: File hash or None if file doesn't exist/error + """ + try: + is_valid, validated_path = validate_file_path(file_path) + if not is_valid: + return None + + if not os.path.exists(validated_path): + return None + + # Use context manager for atomic file read + with open(validated_path, 'rb') as f: + content = f.read() + return hashlib.sha256(content).hexdigest() # Use SHA-256 instead of MD5 for security + + except (OSError, IOError): + return None + +def monitor_config(ksm_config_path, ksm_token, record_uid, config_file_path): + """Monitor config.json file for changes and upload when modified.""" + + # Validate file paths at startup + is_valid, validated_config_file_path = validate_file_path(config_file_path) + if not is_valid: + print("ERROR: Invalid config file path for monitoring") + return + config_file_path = validated_config_file_path + + if ksm_config_path: + is_valid, validated_ksm_config_path = validate_file_path(ksm_config_path) + if not is_valid: + print("ERROR: Invalid KSM config file path for monitoring") + return + ksm_config_path = validated_ksm_config_path + + print("Monitoring config file for changes...") + + last_hash = _get_secure_file_hash(config_file_path) + + while True: + try: + time.sleep(30) # Check every 30 seconds + + current_hash = _get_secure_file_hash(config_file_path) + + if current_hash is None: + if last_hash is not None: + print("Config file was removed, continuing to monitor...") + last_hash = None + continue + + if current_hash != last_hash: + print("Config file changed, uploading to KSM record...") + if upload_config(ksm_config_path, ksm_token, record_uid, config_file_path): + print("Config upload completed successfully") + last_hash = current_hash # Only update hash on successful upload + else: + print("Config upload failed, will retry on next change") + + except KeyboardInterrupt: + print("Monitoring stopped by user") + break + except Exception: + print(f"ERROR: Error in config monitor") + time.sleep(5) + +def main(): + parser = argparse.ArgumentParser(description="KSM Docker Utility - Secure file operations for KSM records") + parser.add_argument("command", choices=['download', 'upload', 'monitor'], help="Command to execute") + parser.add_argument("--ksm-config", help="KSM config file path") + parser.add_argument("--ksm-token", help="KSM access token") + parser.add_argument("--record-uid", required=True, help="KSM record UID") + parser.add_argument("--config-file", required=True, help="Local config.json file path") + + args = parser.parse_args() + + # Validate authentication parameters + if not args.ksm_config and not args.ksm_token: + print("Either --ksm-config or --ksm-token must be provided") + sys.exit(1) + + if args.ksm_config and args.ksm_token: + print("Cannot specify both --ksm-config and --ksm-token") + sys.exit(1) + + # Validate file paths early + if args.ksm_config: + is_valid, _ = validate_file_path(args.ksm_config) + if not is_valid: + print("Invalid KSM config file path") + sys.exit(1) + + is_valid, _ = validate_file_path(args.config_file) + if not is_valid: + print("Invalid config file path") + sys.exit(1) + + + success = False + + try: + if args.command == 'download': + success = download_config(args.ksm_config, args.ksm_token, args.record_uid, args.config_file) + elif args.command == 'upload': + success = upload_config(args.ksm_config, args.ksm_token, args.record_uid, args.config_file) + elif args.command == 'monitor': + # Monitor runs indefinitely + monitor_config(args.ksm_config, args.ksm_token, args.record_uid, args.config_file) + success = True + except KeyboardInterrupt: + print("\nOperation interrupted by user") + success = True + except Exception: + print(f"ERROR: Unexpected error occurred") + success = False + + sys.exit(0 if success else 1) + +if __name__ == "__main__": + main() diff --git a/examples/aws_lambda/lambda_handler.py b/examples/aws_lambda/lambda_handler.py index 42bd3d1e2..2ef0aa655 100644 --- a/examples/aws_lambda/lambda_handler.py +++ b/examples/aws_lambda/lambda_handler.py @@ -7,127 +7,92 @@ # Keeper Commander # Copyright 2024 Keeper Security Inc. # Contact: ops@keepersecurity.com -# -# -# Sample AWS Lambda handler script -# In this example, we generate a report that combines the outputs -# of the `security-audit-report` and `user-report` commands, -# and then send those results to a specified email address ("KEEPER_SENDTO") - -import json import os -import datetime -from typing import Optional - -from email.mime.application import MIMEApplication -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText -import boto3 +# Without mounted volumes, Lambda can only write to /tmp. +# The following environment variables are needed to make sure the Python import cache +# is written to /tmp and not the user folder. +os.environ['HOME'] = '/tmp' +os.environ['TMPDIR'] = '/tmp' +os.environ['TEMP'] = '/tmp' from keepercommander import api -from keepercommander.commands.enterprise import UserReportCommand -from keepercommander.commands.security_audit import SecurityAuditReportCommand -from keepercommander.params import KeeperParams - - -# This Lambda's entry point +from keepercommander.__main__ import get_params_from_config + +# By default, Keeper Commander will attempt to create a .keeper directory +# in the user folder to store the JSON configuration. +# In this case we will create a .keeper directory in /tmp +# to store the JSON configuration (using get_params_from_config()). +keeper_tmp = '/tmp/.keeper' +os.makedirs(keeper_tmp, exist_ok=True) + +# ------------------------------------------------------ +# Keeper initialization function +# ------------------------------------------------------ +def get_params(): + # Change the default JSON configuration location to /tmp + params = get_params_from_config(keeper_tmp + '/config.json') + + # Set username and password for Keeper Commander login + #params.config = {'sso_master_password': True} # Force Master-Password login for SSO users + #params.server = os.environ.get('KEEPER_SERVER') # https://keepersecurity.com + params.user = os.environ.get('KEEPER_USER') + params.password = os.environ.get('KEEPER_PASSWORD') + + + return params + +# ------------------------------------------------------ +# Keeper JSON report function +# ------------------------------------------------------ +def get_keeper_report(params, kwargs): + from keepercommander.commands.aram import AuditReportCommand + from json import loads + + report_class = AuditReportCommand() + report = report_class.execute(params, **kwargs) + return loads(report) + +# ------------------------------------------------------ +# Keeper CLI function +# ------------------------------------------------------ +def run_keeper_cli(params, command): + from keepercommander import cli + + cli.do_command(params, command) + # No return statement as this function runs the CLI command + # without returning anything in Python + +# ------------------------------------------------------ +# Lambda handler +# ------------------------------------------------------ def lambda_handler(event, context): + # Initialize Keeper Commander params params = get_params() - api.login(params) - api.query_enterprise(params, True) - - # Log Commander-related issues (e.g., incorrect credentials) - # using AWS's built-in logging module and abort - if not params.session_token: - print('Not connected') - return 'Error: See Lambda log for details' - if not params.enterprise: - print('Not enterprise administrator') - return 'Error: See Lambda log for details' - - # Generate and send report - report = create_user_report(params) - response = email_result(report) - return response - - -# Create report (format: JSON) combining data from 2 existing Commander reports -def create_user_report(params): # type: (KeeperParams) -> Optional[str] - user_report_cmd = UserReportCommand() - user_report_data = user_report_cmd.execute(params, format='json') - data = json.loads(user_report_data) - users = {x['email']: x for x in data} - security_audit_report = SecurityAuditReportCommand() - security_audit_report_data = security_audit_report.execute(params, format='json') - if security_audit_report_data: - data = json.loads(security_audit_report_data) - for x in data: - if 'email' in x: - email = x['email'] - if email in users: - user = users[email] - for key in x: - if key not in user: - if key not in ('node_path', 'username'): - user[key] = x[key] - else: - users[email] = x - - return json.dumps(list(users.values()), indent=2) + # Keeper login and sync + api.login(params) + api.sync_down(params) + # Enterprise sync (for enterprise commands) + api.query_enterprise(params) -# Email report data (as JSON attachment) to recipient specified in this Lambda -# function's environment variables -def email_result(report): - sender = os.environ.get('KEEPER_SENDER') - sendto = os.environ.get('KEEPER_SENDTO') - region = 'us-east-1' - ses_client = boto3.client('ses', region_name=region) - - message = MIMEMultipart('mixed') - message['Subject'] = 'Keeper Commander User Security Report With CSV (attached)' - message['To'] = sendto - message['From'] = sender - now = datetime.datetime.now() - - body = MIMEText(f'User Report Output created and sent at {now}', 'plain') - message.attach(body) - - attachment = MIMEApplication(report) - attachment.add_header( - 'Content-Disposition', - 'attachment', - filename='user-report.json' + run_keeper_cli( + params, + 'device-approve -a' ) - message.attach(attachment) - - response = ses_client.send_raw_email( - Source=message['From'], - Destinations=[sendto], - RawMessage={'Data': message.as_string()} + + run_keeper_cli( + params, + 'action-report --target locked --apply-action delete --dry-run' ) - return response - - -# Get required Commander parameters from Lambda's environment variables (in "Configuration") -def get_params(): - user = os.environ.get('KEEPER_USER') - pw = os.environ.get('KEEPER_PASSWORD') - server = os.environ.get('KEEPER_SERVER') - private_key = os.environ.get('KEEPER_PRIVATE_KEY') - token = os.environ.get('KEEPER_DEVICE_TOKEN') - my_params = KeeperParams() - - # Force password-login (needed for SSO + Master Password accounts) - my_params.config = {'sso_master_password': True} - - my_params.user = user - my_params.password = pw - my_params.server = server - my_params.device_private_key = private_key - my_params.device_token = token - return my_params - + return get_keeper_report( + params, + { + 'report_type':'raw', + 'format':'json', + 'limit':100, + 'event_type':['login'] + } + ) diff --git a/keepercommander/__init__.py b/keepercommander/__init__.py index eaee1e953..53b77f9dc 100644 --- a/keepercommander/__init__.py +++ b/keepercommander/__init__.py @@ -10,4 +10,4 @@ # Contact: ops@keepersecurity.com # -__version__ = '17.1.10' +__version__ = '17.1.11' diff --git a/keepercommander/__main__.py b/keepercommander/__main__.py index 1dfdcccaa..81408e3ee 100644 --- a/keepercommander/__main__.py +++ b/keepercommander/__main__.py @@ -220,12 +220,41 @@ def main(from_package=False): sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) opts, flags = parser.parse_known_args(sys.argv[1:]) + # Store the original command arguments for proper reconstruction if opts.command: # Find where the command starts in the original args and take everything after it try: cmd_index = sys.argv[1:].index(opts.command) original_args_after_command = sys.argv[1:][cmd_index+1:] + + # Filter out arguments that were consumed by the main parser + filtered_args = [] + skip_next = False + for arg in original_args_after_command: + if skip_next: + skip_next = False + continue + + # Skip arguments that were handled by main parser + main_parser_args = ['--config', '--server', '--user', '--password', '--version', '--debug', + '--batch-mode', '--launched-with-shortcut', '--proxy', '--unmask-all', '--fail-on-throttle', + '-ks', '-ku', '-kp', '-lwsc'] + + is_main_parser_arg = False + for main_arg in main_parser_args: + if arg.startswith(main_arg + '=') or arg == main_arg: + is_main_parser_arg = True + if arg == main_arg: + skip_next = True # Skip the next argument too (the argument value) + break + + if is_main_parser_arg: + continue + + filtered_args.append(arg) + + original_args_after_command = filtered_args except ValueError: original_args_after_command = [] else: @@ -301,8 +330,8 @@ def main(from_package=False): params.batch_mode = True else: if opts.command: - # Use the original argument order instead of the parsed/split version - options = ' '.join(original_args_after_command) if original_args_after_command else '' + # Use the filtered original argument order to preserve proper flag/value pairing + options = ' '.join([shlex.quote(x) for x in original_args_after_command]) if original_args_after_command else '' command = ' '.join([opts.command or '', options]).strip() params.commands.append(command) params.commands.append('q') diff --git a/keepercommander/cli.py b/keepercommander/cli.py index 12fb63471..23939b76b 100644 --- a/keepercommander/cli.py +++ b/keepercommander/cli.py @@ -12,12 +12,14 @@ import datetime import logging import os +import re import shlex import subprocess import sys import threading import time from collections import OrderedDict +from pathlib import Path from typing import Union from prompt_toolkit import PromptSession @@ -299,6 +301,7 @@ def is_msp(params_local): print("\nToggle debug mode") print("\noptional arguments:") print(" -h, --help show this help message and exit") + print(" --file=PATH write DEBUG logs to PATH (does not enable terminal DEBUG)") return elif command_line.lower().startswith('q ') or command_line.lower().startswith('quit '): print("usage: quit|q [-h]") @@ -314,10 +317,24 @@ def is_msp(params_local): if command_line.lower() == 'c' or command_line.lower() == 'cls' or command_line.lower() == 'clear': print(chr(27) + "[2J") - elif command_line == 'debug': - is_debug = logging.getLogger().level <= logging.DEBUG - logging.getLogger().setLevel((logging.WARNING if params.batch_mode else logging.INFO) if is_debug else logging.DEBUG) - logging.info('Debug %s', 'OFF' if is_debug else 'ON') + elif command_line.startswith('debug'): + try: + tokens = shlex.split(command_line) + debug_manager.process_command(tokens, params.batch_mode) + except Exception as e: + logging.error(f"Error processing debug command: {e}") + + + # Toggle Rust verbose logging if available + try: + import keeper_pam_webrtc_rs + new_debug_state = debug_manager.is_console_debug_on(params.batch_mode) + keeper_pam_webrtc_rs.set_verbose_logging(new_debug_state) + logging.debug('Rust verbose logging %s', 'ON' if new_debug_state else 'OFF') + except ImportError: + pass # Rust library not available, skip + except Exception as e: + logging.debug(f'Failed to toggle Rust verbose logging: {e}') else: cmd, args = command_and_args_from_cmd(command_line) @@ -433,6 +450,182 @@ def force_quit(): prompt_session = None +class DebugManager: + """Debug manager for console and file logging.""" + + def __init__(self): + self.logger = logging.getLogger() + + def has_file_logging(self): + """Check if file logging is active.""" + return any(getattr(h, '_debug_file', False) for h in self.logger.handlers) + + def is_console_debug_on(self, batch_mode): + """Check if console debug is enabled.""" + for h in self.logger.handlers: + if isinstance(h, logging.StreamHandler) and not getattr(h, '_debug_file', False): + return h.level == logging.DEBUG + return self.logger.level == logging.DEBUG and not self.has_file_logging() + + def set_console_debug(self, enabled, batch_mode): + """Set console debug level.""" + level = logging.DEBUG if enabled else (logging.WARNING if batch_mode else logging.INFO) + for h in self.logger.handlers: + if isinstance(h, logging.StreamHandler) and not getattr(h, '_debug_file', False): + h.setLevel(level) + + def setup_file_logging(self, file_path): + """Setup debug file logging.""" + try: + validated_path = self._validate_log_file_path(file_path) + + log_dir = os.path.dirname(validated_path) + os.makedirs(log_dir, mode=0o750, exist_ok=True) + + for h in list(self.logger.handlers): + if getattr(h, '_debug_file', False): + self.logger.removeHandler(h) + try: + h.close() + except (OSError, IOError) as close_error: + logging.warning(f'Failed to close log handler: {close_error}') + except Exception as unexpected_error: + logging.error(f'Unexpected error closing log handler: {unexpected_error}') + + # Add new file handler + fh = logging.FileHandler(validated_path, mode='a', encoding='utf-8') + fh.setLevel(logging.DEBUG) + fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')) + fh.addFilter(lambda record: record.levelno != logging.INFO) # Filter out INFO + fh._debug_file = True + self.logger.addHandler(fh) + self.logger.setLevel(logging.DEBUG) + + logging.info(f'Debug file logging enabled: {validated_path}') + return True + except (ValueError, OSError, IOError) as e: + logging.error(f'Failed to setup file logging: {e}') + return False + except Exception as e: + logging.error(f'Unexpected error setting up file logging: {e}') + return False + + def _validate_log_file_path(self, file_path): + """Validate and sanitize log file path to prevent security issues.""" + if not file_path or not isinstance(file_path, str): + raise ValueError("File path must be a non-empty string") + + sanitized_path = ''.join(char for char in file_path if ord(char) >= 32 and char != '\x7f') + + if not sanitized_path: + raise ValueError("File path contains only invalid characters") + + try: + path_obj = Path(sanitized_path) + + resolved_path = path_obj.resolve() + + resolved_str = str(resolved_path).lower() + forbidden_paths = ['/etc/', '/bin/', '/sbin/', '/usr/bin/', '/usr/sbin/', + '/boot/', '/dev/', '/proc/', '/sys/', '/root/'] + + for forbidden in forbidden_paths: + if resolved_str.startswith(forbidden): + raise ValueError(f"Access to system directory '{forbidden}' is not allowed") + + filename = path_obj.name + if not filename or filename in ('.', '..'): + raise ValueError("Invalid filename") + + suspicious_patterns = ['..', '~/', '$', '`', ';', '|', '&', '<', '>', '*', '?'] + for pattern in suspicious_patterns: + if pattern in filename: + raise ValueError(f"Filename contains suspicious pattern: '{pattern}'") + + valid_extensions = ['.log', '.txt', '.out'] + if not any(filename.lower().endswith(ext) for ext in valid_extensions): + logging.warning(f"Log file '{filename}' does not have a standard log extension") + + return str(resolved_path) + + except (OSError, RuntimeError) as e: + raise ValueError(f"Invalid file path: {e}") + except Exception as e: + raise ValueError(f"Path validation failed: {e}") + + def _looks_like_filename(self, token): + """Check if a token looks like a filename with proper validation.""" + if not token or not isinstance(token, str): + return False + + token = token.strip() + + if len(token) < 1: + return False + + has_extension = re.search(r'\.[a-zA-Z0-9]{1,10}$', token) + + has_path_separator = '/' in token or '\\' in token + + looks_like_name = len(token) > 2 and re.match(r'^[a-zA-Z0-9._-]+$', token) + + return bool(has_extension or has_path_separator or looks_like_name) + + def toggle_console_debug(self, batch_mode): + """Toggle console debug on/off.""" + current = self.is_console_debug_on(batch_mode) + new_state = not current + + self.set_console_debug(new_state, batch_mode) + + if not self.has_file_logging(): + level = logging.DEBUG if new_state else (logging.WARNING if batch_mode else logging.INFO) + self.logger.setLevel(level) + + logging.info(f'Debug {"ON" if new_state else "OFF"}') + return new_state + + def process_command(self, tokens, batch_mode): + """Process debug command.""" + # Look for --file argument + file_path = None + file_flag_present = False + + for i, token in enumerate(tokens[1:], 1): + if token == '--file': + file_flag_present = True + if i + 1 < len(tokens): + next_token = tokens[i + 1] + if not next_token.startswith('-'): + file_path = next_token + break + elif token.startswith('--file='): + file_flag_present = True + file_path = token.split('=', 1)[1] + # Handle empty value after equals sign + if not file_path.strip(): + file_path = None + break + + if file_flag_present and not file_path: + print("Please specify the file path for logging to file: debug --file ") + return False + elif file_path: + return self.setup_file_logging(file_path) + else: + # No --file flag present, check for potential filename arguments + if len(tokens) > 1: + for token in tokens[1:]: + # Check if token looks like a filename + if not token.startswith('-') and self._looks_like_filename(token): + print(f"Please specify the --file flag for logging to file: debug --file {token}") + return False + + self.toggle_console_debug(batch_mode) + return True + +debug_manager = DebugManager() + def read_command_with_continuation(prompt_session, params): """Read command with support for line continuation using backslash.""" diff --git a/keepercommander/commands/base.py b/keepercommander/commands/base.py index 668699ff6..c81aca177 100644 --- a/keepercommander/commands/base.py +++ b/keepercommander/commands/base.py @@ -130,16 +130,21 @@ def register_commands(commands, aliases, command_info): service_commands(commands) service_command_info(aliases, command_info) - if sys.version_info.major == 3 and sys.version_info.minor >= 8: - from . import discoveryrotation - discoveryrotation.register_commands(commands) - discoveryrotation.register_command_info(aliases, command_info) + toggle_pam_legacy_commands(legacy=False) -def register_pam_legacy_commands(): - from . import discoveryrotation_v1 - discoveryrotation_v1.register_commands(commands) - discoveryrotation_v1.register_command_info(aliases, command_info) +def toggle_pam_legacy_commands(legacy: bool): + if sys.version_info.major > 3 or (sys.version_info.major == 3 and sys.version_info.minor >= 8): + from . import discoveryrotation + from . import discoveryrotation_v1 + if legacy is True: + discoveryrotation_v1.register_commands(commands) + discoveryrotation_v1.register_command_info(aliases, command_info) + else: + discoveryrotation.register_commands(commands) + discoveryrotation.register_command_info(aliases, command_info) + else: + logging.debug('pam commands require Python 3.8 or newer') def register_enterprise_commands(commands, aliases, command_info): diff --git a/keepercommander/commands/discoveryrotation.py b/keepercommander/commands/discoveryrotation.py index dd32c1794..3bfd59d8f 100644 --- a/keepercommander/commands/discoveryrotation.py +++ b/keepercommander/commands/discoveryrotation.py @@ -23,7 +23,7 @@ from keeper_secrets_manager_core.utils import url_safe_str_to_bytes from .base import (Command, GroupCommand, user_choice, dump_report_data, report_output_parser, field_to_title, - FolderMixin, RecordMixin, register_pam_legacy_commands) + FolderMixin, RecordMixin, toggle_pam_legacy_commands) from .folder import FolderMoveCommand from .ksm import KSMCommand from .pam import gateway_helper, router_helper @@ -102,7 +102,7 @@ def __init__(self): self.register_command('action', GatewayActionCommand(), 'Execute action on the Gateway', 'a') self.register_command('tunnel', PAMTunnelCommand(), 'Manage Tunnels', 't') self.register_command('split', PAMSplitCommand(), 'Split credentials from legacy PAM Machine', 's') - self.register_command('legacy', PAMLegacyCommand(), 'Switch to legacy PAM commands') + self.register_command('legacy', PAMLegacyCommand(), 'Toggle PAM Legacy commands: ON/OFF') self.register_command('connection', PAMConnectionCommand(), 'Manage Connections', 'n') self.register_command('rbi', PAMRbiCommand(), 'Manage Remote Browser Isolation', 'b') self.register_command('project', PAMProjectCommand(), 'PAM Project Import/Export', 'p') @@ -231,13 +231,26 @@ def __init__(self): class PAMLegacyCommand(Command): - parser = argparse.ArgumentParser(prog='pam legacy', description="Switch to using obsolete PAM commands") + parser = argparse.ArgumentParser(prog='pam legacy', description="Toggle PAM Legacy mode: ON/OFF - PAM Legacy commands are obsolete") + parser.add_argument('--status', '-s', required=False, dest='status', action='store_true', help='Show the current status - Legacy mode: ON/OFF') def get_parser(self): return PAMLegacyCommand.parser def execute(self, params, **kwargs): - register_pam_legacy_commands() + from .base import commands + status = kwargs.get('status') or False + pamc = commands.get("pam") + # Legacy mode is missing: connection, split, rbi, project (tunnel - commented out) + conn = pamc.subcommands.get("connection") if pamc and pamc.subcommands else None + legacy = False if conn and isinstance(conn, PAMConnectionCommand) else True + if status: + if legacy: + print("PAM Legacy mode: ON") + else: + print ("PAM Legacy mode: OFF") + return + toggle_pam_legacy_commands(not legacy) class PAMCmdListJobs(Command): @@ -542,13 +555,14 @@ def config_iam_aad_user(_dag, target_record, target_iam_aad_config_uid): [target_record.record_uid, target_record.title, not disabled, record_config_uid, record_resource_uid, schedule, complexity]) - # 6. Construct Request object + # 6. Construct Request object for IAM: empty resourceUid and noop=False rq = router_pb2.RouterRecordRotationRequest() if current_record_rotation: rq.revision = current_record_rotation.get('revision', 0) rq.recordUid = utils.base64_url_decode(target_record.record_uid) rq.configurationUid = utils.base64_url_decode(record_config_uid) - rq.resourceUid = utils.base64_url_decode(record_resource_uid) if record_resource_uid else b'' + rq.resourceUid = b'' # non-empty resourceUid sets is as General rotation + rq.noop = False # True sets it as NOOP rq.schedule = json.dumps(record_schedule_data) if record_schedule_data else '' rq.pwdComplexity = pwd_complexity_rule_list_encrypted rq.disabled = disabled @@ -991,6 +1005,9 @@ def add_folders(sub_folder): # type: (BaseFolderNode) -> None r_requests = [] # type: List[router_pb2.RouterRecordRotationRequest] + # Note: --folder, -fd FOLDER_NAME sets up General rotation + # use --schedule-only, -so to preserve individual setups (General, IAM, NOOP) + # use --iam-aad-config, -iac IAM_AAD_CONFIG_UID to convert to IAM User for _record in pam_records: tmp_dag = TunnelDAG(params, encrypted_session_token, encrypted_transmission_key, _record.record_uid) if _record.record_type in ['pamMachine', 'pamDatabase', 'pamDirectory', 'pamRemoteBrowser']: @@ -1006,6 +1023,7 @@ def add_folders(sub_folder): # type: (BaseFolderNode) -> None ' --resource is used to configure users found on a resource.' ' --iam-aad-config-uid is used to configure AWS IAM or Azure AD users') + # NB! --folder=UID without --iam-aad-config, or --schedule-only converts to General rotation if iam_aad_config_uid: config_iam_aad_user(tmp_dag, _record, iam_aad_config_uid) else: @@ -1588,7 +1606,7 @@ def print_root_rotation_setting(params, is_verbose=False, format_type='table'): common_parser = argparse.ArgumentParser(add_help=False) common_parser.add_argument('--environment', '-env', dest='config_type', action='store', - choices=['local', 'aws', 'azure'], help='PAM Configuration Type', ) + choices=['local', 'aws', 'azure', 'domain', 'oci'], help='PAM Configuration Type', ) common_parser.add_argument('--title', '-t', dest='title', action='store', help='Title of the PAM Configuration') common_parser.add_argument('--gateway', '-g', dest='gateway_uid', action='store', help='Gateway UID or Name') common_parser.add_argument('--shared-folder', '-sf', dest='shared_folder_uid', action='store', @@ -1611,7 +1629,22 @@ def print_root_rotation_setting(params, is_verbose=False, format_type='table'): azure_group.add_argument('--subscription_id', dest='subscription_id', action='store', help='Subscription Id') azure_group.add_argument('--tenant-id', dest='tenant_id', action='store', help='Tenant Id') -azure_group.add_argument('--resource-group', dest='resource_group', action='append', help='Resource Group') +azure_group.add_argument('--resource-group', dest='resource_groups', action='append', help='Resource Group') +domain_group = common_parser.add_argument_group('domain', 'Domain configuration') +domain_group.add_argument('--domain-id', dest='domain_id', action='store', help='Domain ID') +domain_group.add_argument('--domain-hostname', dest='domain_hostname', action='store', help='Domain hostname') +domain_group.add_argument('--domain-port', dest='domain_port', action='store', help='Domain port') +domain_group.add_argument('--domain-use-ssl', dest='domain_use_ssl', choices=['true', 'false'], help='Domain use SSL flag') +domain_group.add_argument('--domain-scan-dc-cidr', dest='domain_scan_dc_cidr', choices=['true', 'false'], help='Domain scan DC CIDR flag') +domain_group.add_argument('--domain-network-cidr', dest='domain_network_cidr', action='store', help='Domain Network CIDR') +domain_group.add_argument('--domain-admin', dest='domain_administrative_credential', action='store', help='Domain administrative credential') +oci_group = common_parser.add_argument_group('oci', 'OCI configuration') +oci_group.add_argument('--oci-id', dest='oci_id', action='store', help='OCI ID') +oci_group.add_argument('--oci-admin-id', dest='oci_admin_id', action='store', help='OCI Admin ID') +oci_group.add_argument('--oci-admin-public-key', dest='oci_admin_public_key', action='store', help='OCI admin public key') +oci_group.add_argument('--oci-admin-private-key', dest='oci_admin_private_key', action='store', help='OCI admin private key') +oci_group.add_argument('--oci-tenancy', dest='oci_tenancy', action='store', help='OCI tenancy') +oci_group.add_argument('--oci-region', dest='oci_region', action='store', help='OCI region') class PamConfigurationEditMixin(RecordEditMixin): @@ -1643,7 +1676,7 @@ def parse_pam_configuration(self, params, record, **kwargs): record.fields.append(field) if len(field.value) == 0: - field.value.append(dict()) + field.value.append({}) value = field.value[0] gateway_uid = None # type: Optional[str] @@ -1709,6 +1742,20 @@ def parse_pam_configuration(self, params, record, **kwargs): value['resourceRef'] = list(record_uids) + @staticmethod + def resolve_single_record(params, record_name, rec_type=''): # type: (KeeperParams, str, str) -> Optional[vault.KeeperRecord] + rec = RecordMixin.resolve_single_record(params, record_name) + if not rec: + recs = [] + for rec in params.record_cache: + vrec = vault.KeeperRecord.load(params, rec) + if vrec and vrec.title == record_name and (not rec_type or rec_type == vrec.record_type): + recs.append(vrec) + if len(recs) > 1: break + if len(recs) == 1: + rec = recs[0] + return rec + def parse_properties(self, params, record, **kwargs): # type: (KeeperParams, vault.TypedRecord, ...) -> None extra_properties = [] self.parse_pam_configuration(params, record, **kwargs) @@ -1720,7 +1767,7 @@ def parse_properties(self, params, record, **kwargs): # type: (KeeperParams, va if schedule: extra_properties.append(f'schedule.defaultRotationSchedule={schedule}') else: - extra_properties.append(f'schedule.defaultRotationSchedule=On-Demand') + extra_properties.append('schedule.defaultRotationSchedule=On-Demand') if record.record_type == 'pamNetworkConfiguration': network_id = kwargs.get('network_id') @@ -1759,10 +1806,68 @@ def parse_properties(self, params, record, **kwargs): # type: (KeeperParams, va tenant_id = kwargs.get('tenant_id') if tenant_id: extra_properties.append(f'secret.tenantId={tenant_id}') - resource_group = kwargs.get('resource_group') - if isinstance(resource_group, list) and len(resource_group) > 0: - rg = '\n'.join(resource_group) + resource_groups = kwargs.get('resource_groups') + if isinstance(resource_groups, list) and len(resource_groups) > 0: + rg = '\n'.join(resource_groups) extra_properties.append(f'multiline.resourceGroups={rg}') + elif record.record_type == 'pamDomainConfiguration': + domain_id = kwargs.get('domain_id') + if domain_id: + extra_properties.append(f'text.pamDomainId={domain_id}') + host = str(kwargs.get('domain_hostname') or '').strip() + port = str(kwargs.get('domain_port') or '').strip() + if host or port: + val = json.dumps({"hostName": host, "port": port}) + extra_properties.append(f"f.pamHostname=$JSON:{val}") + domain_use_ssl = utils.value_to_boolean(kwargs.get('domain_use_ssl')) + if domain_use_ssl is not None: + val = 'true' if domain_use_ssl else 'false' + extra_properties.append(f'checkbox.useSSL={val}') + domain_scan_dc_cidr = utils.value_to_boolean(kwargs.get('domain_scan_dc_cidr')) + if domain_scan_dc_cidr is not None: + val = 'true' if domain_scan_dc_cidr else 'false' + extra_properties.append(f'checkbox.scanDCCIDR={val}') + domain_network_cidr = kwargs.get('domain_network_cidr') + if domain_network_cidr: + extra_properties.append(f'text.networkCIDR={domain_network_cidr}') + domain_administrative_credential = kwargs.get('domain_administrative_credential') + dac = str(domain_administrative_credential or '') + if dac: + # pam import will link it later (once admin pamUser is created) + if kwargs.get('force_domain_admin', False) is True: + if bool(re.search('^[A-Za-z0-9-_]{22}$', dac)) is not True: + logging.warning(f'Invalid Domain Admin User UID: "{dac}" (skipped)') + dac = '' + else: + adm_rec = PamConfigurationEditMixin.resolve_single_record(params, dac, 'pamUser') + if adm_rec and isinstance(adm_rec, vault.TypedRecord) and adm_rec.record_type == 'pamUser': + dac = adm_rec.record_uid + else: + logging.warning(f'Domain Admin User UID: "{dac}" not found (skipped).') + dac = '' + if dac: + prf = record.get_typed_field('pamResources') + prf.value = prf.value or [{}] + prf.value[0]["adminCredentialRef"] = dac + elif record.record_type == 'pamOciConfiguration': + oci_id = kwargs.get('oci_id') + if oci_id: + extra_properties.append(f'text.pamOciId={oci_id}') + oci_admin_id = kwargs.get('oci_admin_id') + if oci_admin_id: + extra_properties.append(f'secret.adminOcid={oci_admin_id}') + oci_admin_public_key = kwargs.get('oci_admin_public_key') + if oci_admin_public_key: + extra_properties.append(f'secret.adminPublicKey={oci_admin_public_key}') + oci_admin_private_key = kwargs.get('oci_admin_private_key') + if oci_admin_private_key: + extra_properties.append(f'secret.adminPrivateKey={oci_admin_private_key}') + oci_tenancy = kwargs.get('oci_tenancy') + if oci_tenancy: + extra_properties.append(f'text.tenancyOci={oci_tenancy}') + oci_region = kwargs.get('oci_region') + if oci_region: + extra_properties.append(f'text.regionOci={oci_region}') if extra_properties: self.assign_typed_fields(record, [RecordEditMixin.parse_field(x) for x in extra_properties]) @@ -1815,9 +1920,13 @@ def execute(self, params, **kwargs): record_type = 'pamAzureConfiguration' elif config_type == 'local': record_type = 'pamNetworkConfiguration' + elif config_type == 'domain': + record_type = 'pamDomainConfiguration' + elif config_type == 'oci': + record_type = 'pamOciConfiguration' else: raise CommandError('pam-config-new', f'--environment {config_type} is not supported' - f' supported options are aws, azure, or local') + ' - supported options: local, aws, azure, domain, oci') title = kwargs.get('title') if not title: @@ -1848,19 +1957,26 @@ def execute(self, params, **kwargs): gateway_uid = None shared_folder_uid = None + admin_cred_ref = None value = field.get_default_value(dict) if value: gateway_uid = value.get('controllerUid') shared_folder_uid = value.get('folderUid') + if record.record_type == 'pamDomainConfiguration' and not kwargs.get('force_domain_admin', False) is True: + # pamUser must exist or "403 Insufficient PAM access to perform this operation" + admin_cred_ref = value.get('adminCredentialRef') if not shared_folder_uid: raise CommandError('pam-config-new', '--shared-folder parameter is required to create a PAM configuration') + gw_name = kwargs.get('gateway_uid') or '' + if not gateway_uid: + logging.warning(f'Gateway "{gw_name}" not found.') self.verify_required(record) pam_configuration_create_record_v6(params, record, shared_folder_uid) - encrypted_session_token, encrypted_transmission_key, transmission_key = get_keeper_tokens(params) + encrypted_session_token, encrypted_transmission_key, _ = get_keeper_tokens(params) # Add DAG for configuration tmp_dag = TunnelDAG(params, encrypted_session_token, encrypted_transmission_key, record_uid=record.record_uid, is_config=True) @@ -1872,6 +1988,8 @@ def execute(self, params, **kwargs): kwargs.get('typescriptrecording'), kwargs.get('remotebrowserisolation') ) + if admin_cred_ref: + tmp_dag.link_user_to_config_with_options(admin_cred_ref, is_admin='on') tmp_dag.print_tunneling_config(record.record_uid, None) # Moving v6 record into the folder @@ -1942,14 +2060,16 @@ def execute(self, params, **kwargs): config_type = kwargs.get('config_type') if config_type: - if not config_type: - raise CommandError('pam-config-new', '--environment parameter is required') if config_type == 'aws': record_type = 'pamAwsConfiguration' elif config_type == 'azure': record_type = 'pamAzureConfiguration' elif config_type == 'local': record_type = 'pamNetworkConfiguration' + elif config_type == 'domain': + record_type = 'pamDomainConfiguration' + elif config_type == 'oci': + record_type = 'pamOciConfiguration' else: record_type = configuration.record_type @@ -1969,16 +2089,19 @@ def execute(self, params, **kwargs): orig_gateway_uid = '' orig_shared_folder_uid = '' + orig_admin_cred_ref = '' # only if pamDomainConfiguration value = field.get_default_value(dict) if value: orig_gateway_uid = value.get('controllerUid') or '' orig_shared_folder_uid = value.get('folderUid') or '' + orig_admin_cred_ref = value.get('adminCredentialRef') or '' self.parse_properties(params, configuration, **kwargs) self.verify_required(configuration) record_management.update_record(params, configuration) + admin_cred_ref = '' value = field.get_default_value(dict) if value: gateway_uid = value.get('controllerUid') or '' @@ -1990,6 +2113,9 @@ def execute(self, params, **kwargs): shared_folder_uid = value.get('folderUid') or '' if shared_folder_uid != orig_shared_folder_uid: FolderMoveCommand().execute(params, src=configuration.record_uid, dst=shared_folder_uid) + if configuration.type_name == 'pamDomainConfiguration' and not kwargs.get('force_domain_admin', False) is True: + # pamUser must exist or "403 Insufficient PAM access to perform this operation" + admin_cred_ref = value.get('adminCredentialRef') or '' # check if there are any permission changes _connections = kwargs.get('connections', None) @@ -2000,11 +2126,16 @@ def execute(self, params, **kwargs): _typescript_recording = kwargs.get('typescriptrecording', None) if (_connections is not None or _tunneling is not None or _rotation is not None or _rbi is not None or - _recording is not None or _typescript_recording is not None): - encrypted_session_token, encrypted_transmission_key, transmission_key = get_keeper_tokens(params) + _recording is not None or _typescript_recording is not None or orig_admin_cred_ref != admin_cred_ref): + encrypted_session_token, encrypted_transmission_key, _ = get_keeper_tokens(params) tmp_dag = TunnelDAG(params, encrypted_session_token, encrypted_transmission_key, configuration.record_uid, is_config=True) tmp_dag.edit_tunneling_config(_connections, _tunneling, _rotation, _recording, _typescript_recording, _rbi) + if orig_admin_cred_ref != admin_cred_ref: + if orig_admin_cred_ref: # just drop is_admin from old Domain + tmp_dag.link_user_to_config_with_options(orig_admin_cred_ref, is_admin='default') + if admin_cred_ref: # set is_admin=true for new Domain Admin + tmp_dag.link_user_to_config_with_options(admin_cred_ref, is_admin='on') tmp_dag.print_tunneling_config(configuration.record_uid, None) for w in self.warnings: logging.warning(w) diff --git a/keepercommander/commands/discoveryrotation_v1.py b/keepercommander/commands/discoveryrotation_v1.py index 90306c180..11b19b8b9 100644 --- a/keepercommander/commands/discoveryrotation_v1.py +++ b/keepercommander/commands/discoveryrotation_v1.py @@ -22,6 +22,7 @@ from keeper_secrets_manager_core.utils import url_safe_str_to_bytes from .base import Command, GroupCommand, user_choice, dump_report_data, report_output_parser, field_to_title, FolderMixin +from .discoveryrotation import PAMLegacyCommand from .folder import FolderMoveCommand from .ksm import KSMCommand from .pam import gateway_helper, router_helper @@ -49,7 +50,7 @@ def register_commands(commands): def register_command_info(_, command_info): - command_info['pam'] = 'Manage PAM Components.' + command_info['pam'] = 'Manage PAM Components (Legacy).' class PAMControllerCommand(GroupCommand): @@ -60,6 +61,7 @@ def __init__(self): self.register_command('config', PAMConfigurationsCommand(), 'Manage PAM Configurations', 'c') self.register_command('rotation', PAMRotationCommand(), 'Manage Rotations', 'r') self.register_command('action', GatewayActionCommand(), 'Execute action on the Gateway', 'a') + self.register_command('legacy', PAMLegacyCommand(), 'Toggle PAM Legacy commands: ON/OFF') # self.register_command('tunnel', PAMTunnelCommand(), 'Manage Tunnels', 't') diff --git a/keepercommander/commands/enterprise_create_user.py b/keepercommander/commands/enterprise_create_user.py index 5482f32ba..2c6e614c6 100644 --- a/keepercommander/commands/enterprise_create_user.py +++ b/keepercommander/commands/enterprise_create_user.py @@ -138,7 +138,11 @@ def execute(self, params, **kwargs): if folder_name: folder_uid = FolderMixin.resolve_folder(params, folder_name) - keeper_url = urlunparse(('https', params.server, '/vault', None, None, f'email/{email}')) + # Extract hostname from params.server in case it contains full URL with protocol + from urllib.parse import urlparse + parsed = urlparse(params.server) + server_netloc = parsed.netloc if parsed.netloc else parsed.path # parsed.path for plain hostname + keeper_url = urlunparse(('https', server_netloc, '/vault', None, None, f'email/{email}')) record = vault.TypedRecord() login_facade.assign_record(record) login_facade.title = f'Keeper Account: {email}' diff --git a/keepercommander/commands/pam_import/edit.py b/keepercommander/commands/pam_import/edit.py index 7d7368704..e0f10490d 100644 --- a/keepercommander/commands/pam_import/edit.py +++ b/keepercommander/commands/pam_import/edit.py @@ -498,7 +498,7 @@ def process_pam_config(self, params, project: dict) -> dict: if pce.az_client_secret: args["client_secret"] = pce.az_client_secret if pce.az_subscription_id: args["subscription_id"] = pce.az_subscription_id if pce.az_tenant_id: args["tenant_id"] = pce.az_tenant_id - if pce.az_resource_groups: args["resource_group"] = pce.az_resource_groups + if pce.az_resource_groups: args["resource_groups"] = pce.az_resource_groups if pce.default_rotation_schedule: args["default_schedule"] = pce.default_rotation_schedule @@ -1681,16 +1681,16 @@ def _initialize(self): self.attachments = None # PamAttachmentsObject # common settings (local, aws, az) - self.pam_resources = {} # {"controllerUid": "", "controllerUid": ""} - "resourceRef": unused/legacy + self.pam_resources = {} # {"folderUid": "", "controllerUid": ""} - "resourceRef": unused/legacy # Local environment - self.network_id: str = "" # text:networkId prefix for naming resources during discovery - self.network_cidr: str = "" # text:networkCIDR network CIDR used for discovery + self.network_id: str = "" # required, text:networkId prefix for naming resources during discovery + self.network_cidr: str = "" # required, text:networkCIDR network CIDR used for discovery # AWS environment - self.aws_id: str = "" # required, text:awsId - self.aws_access_key_id: str = "" # secret:accessKeyId - self.aws_secret_access_key: str = "" # secret:accessSecretKey - self.aws_region_names: List[str] = [] # multiline:regionNames + self.aws_id: str = "" # required, text:awsId + self.aws_access_key_id: str = "" # required, secret:accessKeyId + self.aws_secret_access_key: str = "" # required, secret:accessSecretKey + self.aws_region_names: List[str] = [] # optional, multiline:regionNames # Azure environment self.az_entra_id: str = "" # required, text:azureId self.az_client_id: str = "" # required, secret:clientId @@ -1698,6 +1698,21 @@ def _initialize(self): self.az_subscription_id: str = "" # required, secret:subscriptionId self.az_tenant_id: str = "" # required, secret:tenantId self.az_resource_groups: List[str] = [] # optional, multiline:resourceGroups + # Domain environment: pamDomainConfiguration + self.dom_domain_id: str = '' # required, text:pamDomainId + self.dom_hostname: str = '' # required, pamHostname: + self.dom_port: str = '' # required, pamHostname: + self.dom_use_ssl: bool = False # required, checkbox:useSSL + self.dom_scan_dc_cidr: bool = False # optional, checkbox:scanDCCIDR + self.dom_network_cidr: str = '' # optional, multiline:networkCIDR + # Oracle Cloud Infrastructure (OCI) environment: pamOciConfiguration + # NB! OCI settings subject to change: + self.oci_oci_id: str = '' # required, text:pamOciId + self.oci_admin_oci_id: str = '' # required, secret:adminOcid + self.oci_admin_public_key: str = '' # required, secret:adminPublicKey + self.oci_admin_private_key: str = '' # required, secret:adminPrivateKey + self.oci_tenancy_oci: str = '' # required, text:tenancyOci + self.oci_region_oci: str = '' # required, text:regionOci def __init__(self, environment_type:str, settings:dict, controller_uid:str, folder_uid:str) -> None: self._initialize() diff --git a/keepercommander/commands/record_edit.py b/keepercommander/commands/record_edit.py index dcdf7e38c..27113a7c8 100644 --- a/keepercommander/commands/record_edit.py +++ b/keepercommander/commands/record_edit.py @@ -821,7 +821,11 @@ def execute(self, params, **kwargs): rq.accessExpireOn = utils.current_milli_time() + int(expiration_period.total_seconds() * 1000) rq.isSelfDestruct = True api.communicate_rest(params, rq, 'vault/external_share_add', rs_type=APIRequest_pb2.Device) - url = urlunparse(('https', params.server, '/vault/share', None, None, utils.base64_url_encode(client_key))) + # Extract hostname from params.server in case it contains full URL with protocol + from urllib.parse import urlparse + parsed = urlparse(params.server) + server_netloc = parsed.netloc if parsed.netloc else parsed.path # parsed.path for plain hostname + url = urlunparse(('https', server_netloc, '/vault/share', None, None, utils.base64_url_encode(client_key))) return url else: BreachWatch.scan_and_update_security_data(params, record.record_uid, params.breach_watch) diff --git a/keepercommander/commands/register.py b/keepercommander/commands/register.py index dbdcd1160..1f0137a54 100644 --- a/keepercommander/commands/register.py +++ b/keepercommander/commands/register.py @@ -2394,7 +2394,11 @@ def execute(self, params, **kwargs): query = 'editable=true' api.communicate_rest(params, rq, 'vault/external_share_add', rs_type=APIRequest_pb2.Device) - url = urlunparse(('https', params.server, '/vault/share/', None, query, utils.base64_url_encode(client_key))) + # Extract hostname from params.server in case it contains full URL with protocol + from urllib.parse import urlparse + parsed = urlparse(params.server) + server_netloc = parsed.netloc if parsed.netloc else parsed.path # parsed.path for plain hostname + url = urlunparse(('https', server_netloc, '/vault/share/', None, query, utils.base64_url_encode(client_key))) urls[record_uid] = str(url) if params.batch_mode: diff --git a/keepercommander/commands/tunnel/port_forward/TunnelGraph.py b/keepercommander/commands/tunnel/port_forward/TunnelGraph.py index ca83355e7..d166d2eef 100644 --- a/keepercommander/commands/tunnel/port_forward/TunnelGraph.py +++ b/keepercommander/commands/tunnel/port_forward/TunnelGraph.py @@ -121,7 +121,7 @@ def edit_tunneling_config(self, connections=None, tunneling=None, # When no value in allowedSettings: client will substitute with default # rotation defaults to True, everything else defaults to False - # switching to 3-state on/off/default: on/true, off/false + # switching to 3-state on/off/default: on/true, off/false, # None = Keep existing, 'default' = Reset to default (remove from dict) if connections is not None: connections = self._convert_allowed_setting(connections) @@ -228,6 +228,55 @@ def link_user_to_config(self, user_uid): config_vertex = self.linking_dag.add_vertex(uid=self.record.record_uid) self.link_user(user_uid, config_vertex, belongs_to=True, is_iam_user=True) + def link_user_to_config_with_options(self, user_uid, is_admin=None, belongs_to=None, is_iam_user=None): + config_vertex = self.linking_dag.get_vertex(self.record.record_uid) + if config_vertex is None: + config_vertex = self.linking_dag.add_vertex(uid=self.record.record_uid) + + # self.link_user(user_uid, config_vertex, is_admin, belongs_to, is_iam_user) + source_vertex = config_vertex + user_vertex = self.linking_dag.get_vertex(user_uid) + if user_vertex is None: + user_vertex = self.linking_dag.add_vertex(uid=user_uid, vertex_type=RefType.PAM_USER) + + # switching to 3-state on/off/default: on/true, off/false, + # None = Keep existing, 'default' = Reset to default (remove from dict) + states = {'on': True, 'off': False, 'default': '', 'none': None} + + content = { + "belongs_to": states.get(str(belongs_to).lower()), + "is_admin": states.get(str(is_admin).lower()), + "is_iam_user": states.get(str(is_iam_user).lower()) + } + if user_vertex.vertex_type != RefType.PAM_USER: + user_vertex.vertex_type = RefType.PAM_USER + + dirty = False + if source_vertex.has(user_vertex, EdgeType.ACL): + acl_edge = user_vertex.get_edge(source_vertex, EdgeType.ACL) + existing_content = acl_edge.content_as_dict or {} + old_content = existing_content.copy() + for key in list(existing_content.keys()): + if content.get(key) is not None: + if content[key] == '': + existing_content.pop(key) + elif content[key] in (True, False): + existing_content[key] = content[key] + content = {k: v for k, v in content.items() if v not in (None, '')} + for k, v in content.items(): + existing_content.setdefault(k, v) + if existing_content != old_content: + dirty = True + + if dirty: + user_vertex.belongs_to(source_vertex, EdgeType.ACL, content=existing_content) + # user_vertex.add_data(content=existing_content, needs_encryption=False) + self.linking_dag.save() + else: + content = {k: v for k, v in content.items() if v not in (None, '')} + user_vertex.belongs_to(source_vertex, EdgeType.ACL, content=content) + self.linking_dag.save() + def unlink_user_from_resource(self, user_uid, resource_uid) -> bool: resource_vertex = self.linking_dag.get_vertex(resource_uid) if resource_vertex is None or not self.resource_belongs_to_config(resource_uid): @@ -381,7 +430,7 @@ def set_resource_allowed(self, resource_uid, tunneling=None, connections=None, r # When no value in allowedSettings: client will substitute with default # rotation defaults to True, everything else defaults to False - # switching to 3-state on/off/default: on/true, off/false + # switching to 3-state on/off/default: on/true, off/false, # None = Keep existing, 'default' = Reset to default (remove from dict) if connections is not None: connections = self._convert_allowed_setting(connections) diff --git a/keepercommander/commands/tunnel/port_forward/tunnel_helpers.py b/keepercommander/commands/tunnel/port_forward/tunnel_helpers.py index 769e325f1..35825314b 100644 --- a/keepercommander/commands/tunnel/port_forward/tunnel_helpers.py +++ b/keepercommander/commands/tunnel/port_forward/tunnel_helpers.py @@ -61,10 +61,6 @@ # ICE candidate buffering - store until SDP answer is received -# Global state to track Rust logger initialization -RUST_LOGGER_INITIALIZED = False -_RUST_LOGGER_INITIAL_SETTINGS = {'verbose': False, 'level': None} - # Global conversation key management for multiple concurrent tunnels import threading _CONVERSATION_KEYS_LOCK = threading.Lock() @@ -296,133 +292,21 @@ def get_conversation_status(): } -def initialize_rust_logger(logger_name="keeper-pam-webrtc-rs", verbose=False, level=logging.INFO): - """Initialize the Rust logger to use Python's logging system. - - IMPORTANT: Due to Rust tracing limitations, the logger can only be initialized ONCE per process. - This is typically called by the 'pam tunnel loglevel' command with the --trace flag. - Later calls with different settings will be ignored and a warning will be shown. - - Args: - logger_name: Name for the logger module - verbose: If True, sets logging level to TRACE and enables lifecycle logs (use 'pam tunnel loglevel --trace') - level: Python logging level (will be converted to Rust tracing level) - - Returns: - bool: True if initialization was successful, False otherwise - """ - global RUST_LOGGER_INITIALIZED, _RUST_LOGGER_INITIAL_SETTINGS - - # If already initialized, check if settings match - if RUST_LOGGER_INITIALIZED: - initial_verbose = _RUST_LOGGER_INITIAL_SETTINGS['verbose'] - initial_level = _RUST_LOGGER_INITIAL_SETTINGS['level'] - - if initial_verbose == verbose and initial_level == level: - logging.debug("Rust logger already initialized with same settings.") - return True - else: - # Settings differ - show helpful warning - if initial_verbose != verbose: - if verbose and not initial_verbose: - logging.warning(f"Cannot enable trace logging - Rust logger already initialized in normal mode. " - f"Restart Commander or use RUST_LOG=trace environment variable for trace logging.") - elif not verbose and initial_verbose: - logging.info( - f"Rust logger is in trace mode from 'pam tunnel loglevel --trace' - cannot reduce verbosity.") - return True - - try: - import keeper_pam_webrtc_rs - - if keeper_pam_webrtc_rs is None: - logging.error("Cannot initialize Rust logger: keeper_pam_webrtc_rs module is not available") - return False - - # If no level is specified, use the effective level from the current logger - if level is None: - level = logging.getLogger().getEffectiveLevel() - - # Initialize the Rust logger (can only be done once per process) - keeper_pam_webrtc_rs.initialize_logger(logger_name, verbose=verbose, level=level) - - # Store the initial settings for future reference - RUST_LOGGER_INITIALIZED = True - _RUST_LOGGER_INITIAL_SETTINGS['verbose'] = verbose - _RUST_LOGGER_INITIAL_SETTINGS['level'] = level - - if not verbose: - # Configure WebRTC loggers to ERROR level in Python's logging system - webrtc_loggers = [ - "webrtc_ice.agent.agent_internal", - "webrtc_ice.agent.agent_gather", - "webrtc_sctp.association", - "webrtc.peer_connection.peer_connection_internal", - "webrtc.mux", - "webrtc_ice", - "webrtc_sctp", - "webrtc", - "webrtc_lifecycle" - ] - - # Set all WebRTC loggers to ERROR level - for logger_name in webrtc_loggers: - tmp_logger = logging.getLogger(logger_name) - tmp_logger.setLevel(logging.ERROR) - - trace_note = " (trace mode)" if verbose else "" - logging.info(f"Rust WebRTC logger initialized for '{logger_name}' with level {level}{trace_note}") - - # Provide helpful information about the limitation - if verbose: - logging.info("Trace mode enabled for Rust WebRTC logging. This setting persists for the entire process.") - else: - logging.debug( - "Tip: Use 'pam tunnel loglevel --trace' or set RUST_LOG=trace environment variable for detailed WebRTC logging.") - - return True - except ImportError: - logging.error("Cannot initialize Rust logger: keeper_pam_webrtc_rs module is not available") - return False - except Exception as e: - # Check if it's because the logger was already initialized by another part of the system - if "Logger already initialized" in str(e): - logging.debug(f"Rust logger was already initialized elsewhere: {e}") - RUST_LOGGER_INITIALIZED = True - # We don't know the actual settings used, so store the current request - _RUST_LOGGER_INITIAL_SETTINGS['verbose'] = verbose - _RUST_LOGGER_INITIAL_SETTINGS['level'] = level - return True - else: - logging.error(f"Failed to initialize Rust logger: {e}") - return False - - -def get_rust_logger_state(): - """Get the current state of the Rust logger. - - Returns: - dict: Dictionary containing 'initialized', 'verbose', and 'level' keys - 'verbose' indicates if trace logging was enabled via --trace flag - """ - return { - 'initialized': RUST_LOGGER_INITIALIZED, - 'verbose': _RUST_LOGGER_INITIAL_SETTINGS.get('verbose', False), - 'level': _RUST_LOGGER_INITIAL_SETTINGS.get('level', None) - } - - # Tunnel helper functions def get_or_create_tube_registry(params): """Get or create the tube registry instance, storing it on params for reuse""" try: - from keeper_pam_webrtc_rs import PyTubeRegistry - - # Initialize logger if not already done (fallback) - if not RUST_LOGGER_INITIALIZED: - debug_level = hasattr(params, 'debug') and params.debug - log_level = logging.DEBUG if debug_level else logging.INFO - initialize_rust_logger(logger_name="keeper-pam-webrtc-rs", verbose=False, level=log_level) + from keeper_pam_webrtc_rs import PyTubeRegistry, initialize_logger + + # Initialize logger (Rust lib handles re-initialization gracefully) + # Match current debug state (not just initial --debug flag) + current_is_debug = logging.getLogger().level <= logging.DEBUG + log_level = logging.getLogger().getEffectiveLevel() + initialize_logger( + logger_name="keeper-pam-webrtc-rs", + verbose=current_is_debug, # Use current state, matches debug toggles + level=log_level + ) # Reuse existing registry or create new one if not hasattr(params, 'tube_registry') or params.tube_registry is None: @@ -797,14 +681,14 @@ async def connect_websocket_with_fallback(ws_endpoint, headers, ssl_context, tub if ssl_context: try: async with websockets_connect(ws_endpoint, ssl_context=ssl_context, **connect_kwargs) as websocket: - logging.info("WebSocket connection established with ssl_context parameter") + logging.debug("WebSocket connection established with ssl_context parameter") await handle_websocket_messages(websocket, tube_registry, timeout) return except TypeError as e: if "ssl_context" in str(e): - logging.info("ssl_context parameter not supported, trying with ssl parameter") + logging.debug("ssl_context parameter not supported, trying with ssl parameter") async with websockets_connect(ws_endpoint, ssl=ssl_context, **connect_kwargs) as websocket: - logging.info("WebSocket connection established with ssl parameter") + logging.debug("WebSocket connection established with ssl parameter") await handle_websocket_messages(websocket, tube_registry, timeout) return else: @@ -845,7 +729,7 @@ async def handle_websocket_responses(params, tube_registry, timeout=60, gateway_ connect_ws_endpoint = get_router_ws_url(params) ws_endpoint = connect_ws_endpoint + "/api/user/client" - logging.info(f"Connecting to WebSocket: {ws_endpoint}") + logging.debug(f"Connecting to WebSocket: {ws_endpoint}") # Prepare headers using the same pattern as HTTP encrypted_session_token, encrypted_transmission_key, _ = get_keeper_tokens(params) @@ -1038,6 +922,7 @@ def route_message_to_rust(response_item, tube_registry): # Gateway sends candidates in consistent format, pass them directly to Rust for candidate in candidates_list: + logging.debug(f"Forwarding candidate to Rust: {candidate[:100]}...") # Log first 100 chars tube_registry.add_ice_candidate(tube_id, candidate) print( @@ -1290,6 +1175,23 @@ def signal_from_rust(self, response: dict): # Send the candidate immediately (but still in array format for gateway consistency) self._send_ice_candidate_immediately(data, tube_id) return + elif signal_kind == 'ice_restart_request': + logging.info(f"Received ICE restart request for tube {tube_id}") + + try: + # Execute ICE restart through your tube registry + restart_sdp = self.tube_registry.restart_ice(tube_id) + + if restart_sdp: + logging.info(f"ICE restart successful for tube {tube_id}") + self._send_restart_offer(restart_sdp, tube_id) + else: + logging.error(f"ICE restart failed for tube {tube_id}") + + except Exception as e: + logging.error(f"ICE restart error for tube {tube_id}: {e}") + + return # Local event, no gateway response needed # Unknown signal type else: @@ -1339,7 +1241,56 @@ def _send_ice_candidate_immediately(self, candidate_data, tube_id=None): except Exception as e: logging.error(f"Failed to send ICE candidate via HTTP: {e}") print(f"{bcolors.WARNING}Failed to send ICE candidate: {e}{bcolors.ENDC}") - + + def _send_restart_offer(self, restart_sdp, tube_id): + """Send ICE restart offer via HTTP POST to /send_controller_message with encryption + + Similar to _send_ice_candidate_immediately but sends an offer instead of candidates. + """ + try: + # Format as offer payload for gateway + offer_payload = { + "type": "offer", + "sdp": restart_sdp, + "ice_restart": True # Flag to indicate this is an ICE restart offer + } + string_data = json.dumps(offer_payload) + bytes_data = string_to_bytes(string_data) + encrypted_data = tunnel_encrypt(self.symmetric_key, bytes_data) + + logging.info(f"Sending ICE restart offer to gateway for tube {tube_id}") + print(f"{bcolors.OKBLUE}Connection state: {bcolors.ENDC}sending ICE restart offer...") + + # Send ICE restart offer via HTTP POST with streamResponse=True + # Pass session cookies for router affinity + router_response = router_send_action_to_gateway( + params=self.params, + destination_gateway_uid_str=self.gateway_uid, + gateway_action=GatewayActionWebRTCSession( + conversation_id=self.conversation_id, + inputs={ + "recordUid": self.record_uid, + 'kind': 'ice_restart_offer', # New kind for ICE restart + 'base64Nonce': self.base64_nonce, + 'conversationType': 'tunnel', + "data": encrypted_data, + "trickleICE": True, + } + ), + message_type=pam_pb2.CMT_CONNECT, + is_streaming=True, # Response will come via WebSocket + gateway_timeout=GATEWAY_TIMEOUT, + destination_gateway_cookies=self._get_gateway_cookies_for_tube(tube_id) + # Pass cookies for session affinity + ) + + logging.info(f"ICE restart offer sent via HTTP POST for tube {tube_id} - response expected via WebSocket") + print(f"{bcolors.OKGREEN}ICE restart offer sent successfully{bcolors.ENDC}") + + except Exception as e: + logging.error(f"Failed to send ICE restart offer for tube {tube_id}: {e}") + print(f"{bcolors.FAIL}Failed to send ICE restart offer: {e}{bcolors.ENDC}") + def _get_gateway_cookies_for_tube(self, tube_id): """Get gateway cookies for a specific tube_id, fall back to instance cookies""" if tube_id: @@ -1356,7 +1307,7 @@ def cleanup(self): logging.debug("TunnelSignalHandler cleaned up") def start_rust_tunnel(params, record_uid, gateway_uid, host, port, - seed, target_host, target_port, socks): + seed, target_host, target_port, socks, trickle_ice=True): """ Start a tunnel using Rust WebRTC with trickle ICE via HTTP POST and WebSocket responses. @@ -1414,7 +1365,7 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, "status": "connecting" } """ - print(f"{bcolors.HIGHINTENSITYWHITE}Establishing tunnel with trickle ICE between Commander and Gateway. Please wait..." + logging.info(f"{bcolors.HIGHINTENSITYWHITE}Establishing tunnel with trickle ICE between Commander and Gateway. Please wait..." f"{bcolors.ENDC}") try: @@ -1482,7 +1433,7 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, register_tunnel_session(temp_tube_id, tunnel_session) # Create the tube to get the WebRTC offer with trickle ICE - logging.info("Creating WebRTC offer with trickle ICE gathering") + logging.debug("Creating WebRTC offer with trickle ICE gathering") # Create signal handler for Rust events signal_handler = TunnelSignalHandler( @@ -1493,20 +1444,20 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, base64_nonce=base64_nonce, conversation_id=conversation_id, tube_registry=tube_registry, - tube_id=temp_tube_id, # Use temp ID initially - trickle_ice=True, + tube_id=temp_tube_id, # Use temp ID initially + trickle_ice=trickle_ice, ) signal_handler.gateway_cookies = gateway_cookies # Store signal handler reference so we can send buffered candidates later tunnel_session.signal_handler = signal_handler - print(f"{bcolors.OKBLUE}Creating WebRTC offer and setting up local listener...{bcolors.ENDC}") + logging.debug(f"{bcolors.OKBLUE}Creating WebRTC offer and setting up local listener...{bcolors.ENDC}") offer = tube_registry.create_tube( conversation_id=conversation_id, settings=webrtc_settings, - trickle_ice=True, # Use trickle ICE for real-time candidate exchange + trickle_ice=trickle_ice, # Use trickle ICE for real-time candidate exchange callback_token=webrtc_settings["callback_token"], ksm_config="", krelay_server="krelay." + params.server, @@ -1550,7 +1501,7 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, register_tunnel_session(commander_tube_id, tunnel_session) logging.debug(f"Registered encryption key for conversation: {conversation_id}") - logging.info(f"Expecting WebSocket responses for conversation ID: {conversation_id}") + logging.debug(f"Expecting WebSocket responses for conversation ID: {conversation_id}") # Start or reuse WebSocket listener with cookies for session affinity websocket_thread = start_websocket_listener(params, tube_registry, timeout=300, gateway_uid=gateway_uid, gateway_cookies=gateway_cookies) @@ -1566,7 +1517,7 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, logging.error(f"Failed to store tunnel session for tube: {commander_tube_id}") # Send offer to gateway via HTTP POST with streamResponse=true - print(f"{bcolors.OKBLUE}Sending offer for {conversation_id} to gateway...{bcolors.ENDC}") + logging.debug(f"{bcolors.OKBLUE}Sending offer for {conversation_id} to gateway...{bcolors.ENDC}") # Prepare the offer data data = {"offer": offer.get("offer")} @@ -1600,7 +1551,7 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, ) # With streamResponse=true, HTTP response should be empty - print(f"{bcolors.OKGREEN}Offer sent to gateway{bcolors.ENDC}") + logging.debug(f"{bcolors.OKGREEN}Offer sent to gateway{bcolors.ENDC}") # Mark offer as sent in both signal handler and session signal_handler.offer_sent = True @@ -1626,7 +1577,7 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, record_uid=record_uid ) - print(f"{bcolors.OKBLUE}Connection state: {bcolors.ENDC}gathering candidates...") + logging.debug(f"{bcolors.OKBLUE}Connection state: {bcolors.ENDC}gathering candidates...") return { "success": True, diff --git a/keepercommander/commands/tunnel_and_connections.py b/keepercommander/commands/tunnel_and_connections.py index a1b627718..ab49f4122 100644 --- a/keepercommander/commands/tunnel_and_connections.py +++ b/keepercommander/commands/tunnel_and_connections.py @@ -8,22 +8,7 @@ # Copyright 2023 Keeper Security Inc. # Contact: sm@keepersecurity.com # -# RUST WEBRTC LOGGING: -# - Enhanced logging with initialize_rust_logger() function -# - Use 'pam tunnel loglevel --trace' to configure session-wide trace logging -# - IMPORTANT: Rust logger can only be initialized ONCE per process -# - Loglevel command must be run before any tunnel operations -# - If loglevel is not set, tunnel start will use default logging -# -# USAGE EXAMPLES: -# - For detailed logging: -# pam tunnel loglevel --trace (configure session) -# pam tunnel start RECORD_UID (start with trace logging) -# pam tunnel list (list with trace logging) -# - For normal logging: -# pam tunnel start RECORD_UID (start with default logging) -# - Alternative: RUST_LOG=trace pam tunnel start RECORD_UID (environment variable) -# + import argparse import logging import os @@ -33,9 +18,8 @@ from .base import Command, GroupCommand, dump_report_data, RecordMixin from .tunnel.port_forward.TunnelGraph import TunnelDAG from .tunnel.port_forward.tunnel_helpers import find_open_port, get_config_uid, get_keeper_tokens, \ - get_or_create_tube_registry, get_gateway_uid_from_record, initialize_rust_logger, RUST_LOGGER_INITIALIZED, \ - get_rust_logger_state, resolve_record, resolve_pam_config, resolve_folder, remove_field, start_rust_tunnel, \ - get_tunnel_session, CloseConnectionReasons, create_rust_webrtc_settings + get_or_create_tube_registry, get_gateway_uid_from_record, resolve_record, resolve_pam_config, resolve_folder, \ + remove_field, start_rust_tunnel, get_tunnel_session, CloseConnectionReasons, create_rust_webrtc_settings from .. import api, vault, record_management from ..display import bcolors from ..error import CommandError @@ -48,7 +32,6 @@ class PAMTunnelCommand(GroupCommand): def __init__(self): super(PAMTunnelCommand, self).__init__() - self.register_command('loglevel', PAMTunnelLogLevelCommand(), 'Set logging level for tunnel session', 'g') self.register_command('start', PAMTunnelStartCommand(), 'Start Tunnel', 's') self.register_command('list', PAMTunnelListCommand(), 'List all Tunnels', 'l') self.register_command('stop', PAMTunnelStopCommand(), 'Stop Tunnel to the server', 'x') @@ -76,54 +59,6 @@ def __init__(self): # Individual Commands -class PAMTunnelLogLevelCommand(Command): - pam_cmd_parser = argparse.ArgumentParser(prog='pam tunnel loglevel', - description='Set logging level for tunnel session. ' - 'Run this before starting tunnels to configure logging.') - pam_cmd_parser.add_argument('--trace', '-t', required=False, dest='trace', action='store_true', - help='Enable detailed WebRTC trace logging for the entire session. ' - 'This setting cannot be changed once tunnels are started.') - - def get_parser(self): - return PAMTunnelLogLevelCommand.pam_cmd_parser - - def execute(self, params, **kwargs): - trace_logging = kwargs.get('trace', False) - - # Check if logger is already initialized - if RUST_LOGGER_INITIALIZED: - current_settings = get_rust_logger_state() - if current_settings['verbose'] == trace_logging: - if trace_logging: - print(f"{bcolors.OKGREEN}Tunnel session is already configured with trace logging enabled.{bcolors.ENDC}") - else: - print(f"{bcolors.OKGREEN}Tunnel session is already configured with normal logging.{bcolors.ENDC}") - else: - if trace_logging: - print(f"{bcolors.FAIL}Cannot enable trace logging - tunnel session already configured with normal logging.{bcolors.ENDC}") - print(f"{bcolors.WARNING}Restart Commander to change logging configuration.{bcolors.ENDC}") - else: - print(f"{bcolors.WARNING}Tunnel session is already configured with trace logging enabled.{bcolors.ENDC}") - print(f"{bcolors.OKBLUE}To disable trace logging, restart Commander.{bcolors.ENDC}") - return - - # Initialize the Rust logger for the session - debug_level = hasattr(params, 'debug') and params.debug - log_level = logging.DEBUG if debug_level else logging.INFO - - if initialize_rust_logger(logger_name="keeper-pam-webrtc-rs", verbose=trace_logging, level=log_level): - if trace_logging: - print(f"{bcolors.OKGREEN}Tunnel session configured with trace logging enabled.{bcolors.ENDC}") - print(f"{bcolors.OKBLUE}Detailed WebRTC logs will be shown for all tunnel operations.{bcolors.ENDC}") - print(f"{bcolors.OKBLUE}Now you can run: pam tunnel start RECORD_UID{bcolors.ENDC}") - else: - print(f"{bcolors.OKGREEN}Tunnel session configured with normal logging.{bcolors.ENDC}") - print(f"{bcolors.OKBLUE}Use 'pam tunnel loglevel --trace' for detailed logging.{bcolors.ENDC}") - print(f"{bcolors.OKBLUE}Now you can run: pam tunnel start RECORD_UID{bcolors.ENDC}") - else: - print(f"{bcolors.FAIL}Failed to configure tunnel session logging.{bcolors.ENDC}") - - class PAMTunnelListCommand(Command): pam_cmd_parser = argparse.ArgumentParser(prog='pam tunnel list') @@ -131,14 +66,8 @@ def get_parser(self): return PAMTunnelListCommand.pam_cmd_parser def execute(self, params, **kwargs): - # Rust logger should already be initialized by the loglevel command - # If not initialized, use default settings - if not RUST_LOGGER_INITIALIZED: - debug_level = hasattr(params, 'debug') and params.debug - log_level = logging.DEBUG if debug_level else logging.INFO - initialize_rust_logger(logger_name="keeper-pam-webrtc-rs", verbose=False, level=log_level) - # Try to get active tunnels from Rust PyTubeRegistry + # Logger initialization is handled by get_or_create_tube_registry() tube_registry = get_or_create_tube_registry(params) if tube_registry: if not tube_registry.has_active_tubes(): @@ -413,6 +342,9 @@ class PAMTunnelStartCommand(Command): type=int, default=0, help='The port number on which the server will be listening for incoming connections. ' 'If not set, random open port on the machine will be used.') + pam_cmd_parser.add_argument('--no-trickle-ice', '-nti', required=False, dest='no_trickle_ice', action='store_true', + help='Disable trickle ICE for WebRTC connections. By default, trickle ICE is enabled ' + 'for real-time candidate exchange.') def get_parser(self): return PAMTunnelStartCommand.pam_cmd_parser @@ -435,22 +367,18 @@ def execute(self, params, **kwargs): return # Check for Rust WebRTC library availability + # Logger initialization is handled by get_or_create_tube_registry() tube_registry = get_or_create_tube_registry(params) if not tube_registry: print(f"{bcolors.FAIL}This command requires the Rust WebRTC library (keeper_pam_webrtc_rs).{bcolors.ENDC}") print(f"{bcolors.OKBLUE}Please ensure the keeper_pam_webrtc_rs module is installed and available.{bcolors.ENDC}") return - # Initialize Rust logger with defaults if not already set by loglevel command - if not RUST_LOGGER_INITIALIZED: - debug_level = hasattr(params, 'debug') and params.debug - log_level = logging.DEBUG if debug_level else logging.INFO - initialize_rust_logger(logger_name="keeper-pam-webrtc-rs", verbose=False, level=log_level) - record_uid = kwargs.get('uid') host = kwargs.get('host') port = kwargs.get('port') - + no_trickle_ice = kwargs.get('no_trickle_ice', False) + if port is not None and port > 0: try: port = find_open_port(tried_ports=[], preferred_port=port, host=host) @@ -513,9 +441,13 @@ def execute(self, params, **kwargs): print(f"{bcolors.FAIL}Gateway not found for record {record_uid}.{bcolors.ENDC}") return - # Use Rust WebRTC implementation with trickle ICE - print(f"{bcolors.OKBLUE}Using trickle ICE with HTTP POST sending and WebSocket receiving{bcolors.ENDC}") - result = start_rust_tunnel(params, record_uid, gateway_uid, host, port, seed, target_host, target_port, socks) + # Use Rust WebRTC implementation with configurable trickle ICE + trickle_ice = not no_trickle_ice + if trickle_ice: + print(f"{bcolors.OKBLUE}Using trickle ICE with HTTP POST sending and WebSocket receiving{bcolors.ENDC}") + else: + print(f"{bcolors.OKBLUE}Using standard ICE (trickle ICE disabled){bcolors.ENDC}") + result = start_rust_tunnel(params, record_uid, gateway_uid, host, port, seed, target_host, target_port, socks, trickle_ice) if result and result.get("success"): # The helper will show endpoint table when local socket is actually listening @@ -565,18 +497,13 @@ def execute(self, params, **kwargs): raise CommandError('pam tunnel diagnose', '"record" parameter is required.') # Check for Rust WebRTC library availability + # Logger initialization is handled by get_or_create_tube_registry() tube_registry = get_or_create_tube_registry(params) if not tube_registry: print(f"{bcolors.FAIL}This command requires the Rust WebRTC library (keeper_pam_webrtc_rs).{bcolors.ENDC}") print(f"{bcolors.OKBLUE}Please ensure the keeper_pam_webrtc_rs module is installed and available.{bcolors.ENDC}") return 1 - # Initialize Rust logger if not already done - if not RUST_LOGGER_INITIALIZED: - debug_level = hasattr(params, 'debug') and params.debug - log_level = logging.DEBUG if debug_level else logging.INFO - initialize_rust_logger(logger_name="keeper-pam-webrtc-rs", verbose=verbose, level=log_level) - # Resolve and validate the record api.sync_down(params) record = RecordMixin.resolve_single_record(params, record_name) @@ -705,6 +632,8 @@ class PAMConnectionEditCommand(Command): parser.add_argument('--connections-override-port', '-cop', required=False, dest='connections_override_port', action='store', help='Port to use for connections. If not provided, ' 'the port from the record will be used.') + parser.add_argument('--key-events', '-k', dest='key_events', choices=choices, + help='Toggle Key Events settings') parser.add_argument('--silent', '-s', required=False, dest='silent', action='store_true', help='Silent mode - don\'t print PAM User, PAM Config etc.') @@ -792,6 +721,8 @@ def execute(self, params, **kwargs): else: if not pam_settings.value: pam_settings.value.append({"connection": {}, "portForward": {}}) + if not pam_settings.value[0]: + pam_settings.value[0] = {"connection": {}, "portForward": {}} if _connections: if connection_override_port: pam_settings.value[0]["connection"]["port"] = connection_override_port @@ -805,6 +736,34 @@ def execute(self, params, **kwargs): elif protocol or connection_override_port: logging.warning(f'Connection override port and protocol can be set only when connections are enabled ' f'with {bcolors.OKGREEN}--connections=on{bcolors.ENDC} option') + + # pam_settings.value already initialized above + key_events = kwargs.get('key_events') # on/off/default + if key_events: + psv = pam_settings.value[0] if pam_settings and pam_settings.value else {} + vcon = psv.get('connection', {}) if isinstance(psv, dict) else {} + rik = vcon.get('recordingIncludeKeys') if isinstance(vcon, dict) else None + if key_events == 'default': + if rik is not None: + pam_settings.value[0]["connection"].pop('recordingIncludeKeys', None) + dirty = True + else: + logging.debug(f'recordingIncludeKeys is already set to "default" on record={record_uid}') + elif key_events == 'on': + if value_to_boolean(key_events) != value_to_boolean(rik): + pam_settings.value[0]["connection"]["recordingIncludeKeys"] = True + dirty = True + else: + logging.debug(f'recordingIncludeKeys is already enabled on record={record_uid}') + elif key_events == 'off': + if value_to_boolean(key_events) != value_to_boolean(rik): + pam_settings.value[0]["connection"]["recordingIncludeKeys"] = False + dirty = True + else: + logging.debug(f'recordingIncludeKeys is already disabled on record={record_uid}') + else: + logging.debug(f'Unexpected value for --key-events {key_events} (ignored)') + if dirty: record_management.update_record(params, record) api.sync_down(params) diff --git a/keepercommander/commands/utils.py b/keepercommander/commands/utils.py index 7d2e7ddd3..803ea1ecb 100644 --- a/keepercommander/commands/utils.py +++ b/keepercommander/commands/utils.py @@ -498,6 +498,8 @@ def print_device_info(params: KeeperParams): print('{:>32}: {}'.format('Is SSO User', params.settings['sso_user'] if 'sso_user' in params.settings else False)) + print('{:>32}: {}'.format('Config file', params.config_filename)) + print("\nAvailable sub-commands: ", bcolors.OKBLUE + (", ".join(this_device_available_command_verbs)) + bcolors.ENDC) diff --git a/keepercommander/keeper_dag/vertex.py b/keepercommander/keeper_dag/vertex.py index e50eac572..4c057e5c1 100644 --- a/keepercommander/keeper_dag/vertex.py +++ b/keepercommander/keeper_dag/vertex.py @@ -520,14 +520,14 @@ def belongs_to(self, vertex: DAGVertex, edge_type: EdgeType, content: Optional[A :return: """ - self.debug(f"connect {self.uid} to {vertex.uid} with edge type {edge_type.value}", level=1) + self.dag.logger.debug(f"connect {self.uid} to {vertex.uid} with edge type {edge_type.value}", level=1) if vertex is None: raise ValueError("Vertex is blank.") if self.uid == self.dag.uid and not (edge_type == EdgeType.DATA or edge_type == EdgeType.DELETION): if from_load is False: raise DAGIllegalEdgeException(f"Cannot create edge to self for edge type {edge_type}.") - self.dag.debug(f"vertex {self.uid} , the root vertex, " + self.dag.logger.debug(f"vertex {self.uid} , the root vertex, " f"attempted to create '{edge_type.value}' edge to self, skipping.") return @@ -538,7 +538,7 @@ def belongs_to(self, vertex: DAGVertex, edge_type: EdgeType, content: Optional[A if self.uid == vertex.uid and not (edge_type == EdgeType.DATA or edge_type == EdgeType.DELETION): if from_load is False: raise DAGIllegalEdgeException(f"Cannot create edge to self for edge type {edge_type}.") - self.dag.debug(f"vertex {self.uid} attempted to make '{edge_type.value}' to self, skipping.") + self.dag.logger.debug(f"vertex {self.uid} attempted to make '{edge_type.value}' to self, skipping.") return # Figure out what version of the edge we are. diff --git a/keepercommander/resources/service_config.ini b/keepercommander/resources/service_config.ini index 336c55ca5..e20d6479b 100644 --- a/keepercommander/resources/service_config.ini +++ b/keepercommander/resources/service_config.ini @@ -3,6 +3,9 @@ port_prompt = Enter Port No: ngrok_prompt = Enable Ngrok Tunneling? (y/n): ngrok_token_prompt = Enter Ngrok Auth Token: ngrok_custom_domain_prompt = Enter Ngrok Custom Domain: +cloudflare_prompt = Enable Cloudflare Tunneling? (y/n): +cloudflare_token_prompt = Enter Cloudflare tunnel token: +cloudflare_custom_domain_prompt = Enter Cloudflare custom domain: run_mode_prompt = Select run mode (foreground/background): queue_enabled_prompt = Enable Request Queue? (y/n): tls_certificate = Enable TLS Certificate? (y/n): @@ -20,6 +23,10 @@ config_format_prompt = Select configuration format (json/yaml): [Validation_Messages] invalid_port = Invalid port: invalid_ngrok_token = Invalid Ngrok token: +invalid_cloudflare_token = Invalid Cloudflare token: +invalid_cloudflare_domain = Invalid Cloudflare domain: +cloudflare_token_required = Cloudflare tunnel token is required when using Cloudflare tunnel. +cloudflare_domain_required = Cloudflare custom domain is required when using Cloudflare tunnel. invalid_run_mode = Invalid run mode: invalid_certificate = Invalid Certificate: invalid_rate_limit = Invalid rate limit: diff --git a/keepercommander/service/README.md b/keepercommander/service/README.md index 20a721f94..2c50c3328 100644 --- a/keepercommander/service/README.md +++ b/keepercommander/service/README.md @@ -39,7 +39,10 @@ You'll be prompted to configure: - Ngrok tunneling (y/n) - Ngrok auth token - Ngrok custom domain -- Enable TLS Certificate (y/n) +- Cloudflare tunneling (y/n) - *if ngrok is disabled* + - Cloudflare tunnel token (required) + - Cloudflare custom domain (required) +- Enable TLS Certificate (y/n) - *if both ngrok and cloudflare are disabled* - TLS Certificate path - TLS Certificate password - Enable Request Queue (y/n) @@ -65,6 +68,12 @@ Configure the service streamlined with Ngrok: ```bash My Vault> service-create -p -f -c 'tree,record-add,audit-report' -ng -cd -rm -q -aip -dip +``` + +Configure the service streamlined with Cloudflare: + +```bash + My Vault> service-create -p -f -c 'tree,record-add,audit-report' -cf -cfd -rm -q -aip -dip ``` Parameters: @@ -72,6 +81,8 @@ Parameters: - `-c, --commands`: Comma-separated list of allowed commands - `-ng, --ngrok`: Ngrok authentication token for public URL access - `-cd, --ngrok_custom_domain`: Ngrok custom domain name +- `-cf, --cloudflare`: Cloudflare tunnel token (required when using cloudflare) +- `-cfd, --cloudflare_custom_domain`: Cloudflare custom domain name (required when using cloudflare) - `-f, --fileformat`: File format (json/yaml) - `-crtf, --certfile`: Certificate file path - `-crtp, --certpassword`: Certificate password @@ -200,11 +211,18 @@ result_retention: 3600 # Result retention (1 hour) - **Example**: Setting `"20/minute"` effectively provides ~20 requests per minute across all endpoints #### Error Responses -- **503 Service Unavailable**: Queue is full + +**Client Errors (4xx):** +- **400 Bad Request**: Invalid request format, missing required fields, or malformed JSON +- **401 Unauthorized**: Missing, invalid, or expired API key; no active session +- **403 Forbidden**: IP not allowed, access denied, or command not in allowed list - **404 Not Found**: Request ID not found -- **500 Internal Server Error**: Command execution failed - **429 Too Many Requests**: Rate limit exceeded +**Server Errors (5xx):** +- **500 Internal Server Error**: Command execution failed or unexpected server error +- **503 Service Unavailable**: Queue is full or service temporarily unavailable + ### File Input Parameters (FILEDATA) Commands requiring file input can use the `FILEDATA` placeholder with JSON content sent in the `filedata` field. @@ -242,6 +260,11 @@ The service configuration is stored as an attachment to a vault record in JSON/Y - Ngrok authentication token - Ngrok custom domain - Generated public URL +- **Cloudflare Configuration** (optional): + - Cloudflare tunneling enabled/disabled + - Cloudflare tunnel token + - Cloudflare custom domain + - Generated public URL - **TLS Certificate Configuration** (optional): - TLS certificate enabled/disabled - Certificate file path @@ -288,6 +311,13 @@ When ngrok tunneling is enabled, additional logs are maintained: - **Includes**: Tunnel establishment, reconnection attempts, and ngrok-specific error messages - **Auto-created**: Created automatically when ngrok tunneling is configured and service starts +### Cloudflare Logging +When Cloudflare tunneling is enabled, additional logs are maintained: +- **Location**: `keepercommander/service/core/logs/cloudflare_tunnel_subprocess.log` +- **Content**: Cloudflare tunnel startup, connection events, public URL generation, and tunnel errors +- **Includes**: Tunnel establishment, connection timeout detection, and firewall blocking diagnostics +- **Auto-created**: Created automatically when Cloudflare tunneling is configured and service starts + ### General Logging Configuration - **Configuration file**: `~/.keeper/logging_config.yaml` (auto-generated) - **Default level**: `INFO` @@ -333,7 +363,7 @@ docker images ### Authentication Methods -The Docker container supports two authentication methods: +The Docker container supports four authentication methods: #### Method 1: Using Credentials (Recommended for new deployments) Pass credentials directly via command line arguments. The container will automatically: @@ -344,6 +374,16 @@ Pass credentials directly via command line arguments. The container will automat #### Method 2: Using Config File (For existing configurations) Mount an existing Keeper configuration file to the container. +#### Method 3: Using KSM Token +Use a KSM one-time access token to download the `config.json` configuration from a Keeper record. The container will: +- Download the `config.json` attachment from the specified record using the provided KSM token +- Use the downloaded config for authentication + +#### Method 4: Using KSM Config File +Use Keeper Secrets Manager (KSM) config file to download the `config.json` configuration from a Keeper record. The container will: +- Download the `config.json` attachment from the specified record using the mounted KSM config file +- Use the downloaded config for authentication + ### Run Docker Container #### With User/Password Authentication @@ -401,8 +441,16 @@ Before using config file authentication, you must first create a properly config this-device persistent-login on ``` -4. **Copy config file:** - Once configured, locate the `config.json` file in `.keeper` directory in your host machine and copy the contents of the `config.json` file to your desired path (e.g., `/path/to/local/config.json`) for Docker mounting. +4. **Set timeout:** + ```bash + this-device timeout 43200 + ``` + +5. **Copy config file:** + Once configured, locate the `config.json` file in the `.keeper` directory on your host machine and copy the contents of the `config.json` file to your desired path (e.g., `/path/to/local/config.json`) for Docker mounting. + +6. **Remove the original config file:** + After copying, delete the `config.json` file from the `.keeper` directory on your host machine to prevent duplicate configurations with the same device token/clone code. Mount your existing Keeper config file: ```bash @@ -412,24 +460,135 @@ docker run -d -p : \ service-create -p -c '' -f ``` -#### Interactive Keeper Shell Mode +#### With KSM Token Authentication +**Prerequisites:** + +Before using KSM Token authentication, you must: + +1. **Create a KSM Application** in your Keeper vault +2. **Store the generated access token securely** +3. **Create a Keeper record** containing your `config.json` as an attachment +4. **Share the record** with your KSM application + +**Setup Steps:** + +1. **Login to Keeper on your host machine:** + ```bash + keeper shell + # Then login with your credentials + login user@example.com + ``` + +2. **Register device:** + ```bash + this-device register + ``` + +3. **Enable persistent login:** + ```bash + this-device persistent-login on + ``` + +4. **Set timeout:** + ```bash + this-device timeout 43200 + ``` + +5. **Upload config file:** + Once configured, locate the `config.json` file in the `.keeper` directory on your host machine. Upload this file as an attachment to a record within a shared folder in your vault. + +6. **Remove the original config file:** + After uploading, delete the `config.json` file from the `.keeper` directory on your host machine to prevent duplicate configurations with the same device token/clone code. + +7. **Create KSM Access Token:** + - Go to Vault → Secrets Manager → My Applications + - Create new application and provide access to your shared folder + - Grant "Can Edit" permission and generate the access token + - Store the generated access token securely + +**Run Container:** +```bash +docker run -d -p : \ + keeper-commander \ + service-create -p -c '' -f \ + --ksm-token \ + --record +``` -Run Keeper Commander in interactive mode for manual configuration and testing: +**Example:** ```bash -docker run -it keeper-commander shell +docker run -d -p 9007:9007 \ + keeper-commander \ + service-create -p 9007 -c 'ls,tree' -f json \ + --ksm-token US:odncsdcindsijiojijj32i3ij2iknm23 \ + --record ABC123-DEF456-GHI789 ``` -This will start the container with an interactive terminal session, allowing you to: -- Configure the service interactively using the `service-create` command -- Test commands manually before setting up the service -- Access the full Keeper Commander CLI interface -- Debug configuration issues +#### With KSM Config File Authentication + +**Prerequisites:** + +Before using KSM config file authentication, you must: + +1. **Create a KSM Application** in your Keeper vault +2. **Generate a KSM config file** (`ksm-config.json`) +3. **Create a Keeper record** containing your service `config.json` as an attachment +4. **Share the record** with your KSM application + +**Setup Steps:** + +1. **Login to Keeper on your host machine:** + ```bash + keeper shell + # Then login with your credentials + login user@example.com + ``` + +2. **Register device:** + ```bash + this-device register + ``` -**Example interactive session:** +3. **Enable persistent login:** + ```bash + this-device persistent-login on + ``` + +4. **Set timeout:** + ```bash + this-device timeout 43200 + ``` + +5. **Upload config file:** + Once configured, locate the `config.json` file in the `.keeper` directory on your host machine. Upload this file as an attachment to a record within a shared folder in your vault. + +6. **Remove the original config file:** + After uploading, delete the `config.json` file from the `.keeper` directory on your host machine to prevent duplicate configurations with the same device token/clone code. + +7. **Create KSM Configuration File:** + - Go to Vault → Secrets Manager → My Applications + - Select your application, go to `Devices`, and click on `Add Device`. + - Use `Configuration File` method and download the JSON file. + - Rename the downloaded file to `ksm-config.json` to avoid any conflict with `.keeper/config.json`. + +**Run Container:** ```bash -docker run -it keeper-commander shell -# Inside the container: -My Vault> login user@example.com +docker run -d -p : \ + -v /path/to/local/ksm-config.json:/home/commander/ksm-config.json \ + keeper-commander \ + service-create -p -c '' -f \ + --ksm-config /home/commander/ksm-config.json \ + --record +``` + +**Example:** +```bash +docker run -d -p 9007:9007 \ + -v /path/to/local/ksm-config.json:/home/commander/ksm-config.json \ + keeper-commander \ + service-create -p 9007 -c 'ls,tree' -f json \ + --ksm-config /home/commander/ksm-config.json \ + --record ABC123-DEF456-GHI789 ``` ### Verify Deployment @@ -458,17 +617,10 @@ My Vault> login user@example.com ### Container Architecture - **Base Image**: `python:3.11-slim` -- **User**: Non-root user `commander` (UID: 1000, GID: 1000) - **Working Directory**: `/commander` - **Config Directory**: `/home/commander/.keeper/` - **Entrypoint**: `docker-entrypoint.sh` with automatic authentication setup -### Security Features - -- **Non-root execution**: Container runs as user `commander` for enhanced security -- **Persistent login**: Maintains authentication across container restarts -- **Flexible authentication**: Supports both credential and config file authentication - ### Execute Command Endpoint ```bash diff --git a/keepercommander/service/api/command.py b/keepercommander/service/api/command.py index a9e1e0efb..2cb6c832a 100644 --- a/keepercommander/service/api/command.py +++ b/keepercommander/service/api/command.py @@ -57,7 +57,7 @@ def execute_command_direct(**kwargs) -> Tuple[Union[Response, bytes], int]: except Exception as e: logger.error(f"Error executing command: {e}") - return jsonify({"success": False, "error": f"Error: {str(e)}"}), 500 + return jsonify({"status": "error", "error": f"Error: {str(e)}"}), 500 finally: # Clean up temporary files RequestValidator.cleanup_temp_files(temp_files) @@ -98,7 +98,7 @@ def execute_command(**kwargs) -> Tuple[Response, int]: # Clean up temp files if queue is full RequestValidator.cleanup_temp_files(temp_files) return jsonify({ - "success": False, + "status": "error", "error": "Error: Request queue is full. Please try again later." }), 503 # 503 Service Unavailable @@ -106,7 +106,7 @@ def execute_command(**kwargs) -> Tuple[Response, int]: logger.error(f"Error submitting request: {e}") # Clean up temp files on error RequestValidator.cleanup_temp_files(temp_files) - return jsonify({"success": False, "error": f"{str(e)}"}), 500 + return jsonify({"status": "error", "error": f"{str(e)}"}), 500 @bp.route("/status/", methods=["GET"]) @unified_api_decorator() @@ -116,7 +116,7 @@ def get_request_status(request_id: str, **kwargs) -> Tuple[Response, int]: status_info = queue_manager.get_request_status(request_id) if status_info is None: return jsonify({ - "success": False, + "status": "error", "error": "Error: Request not found" }), 404 @@ -128,7 +128,7 @@ def get_request_status(request_id: str, **kwargs) -> Tuple[Response, int]: except Exception as e: logger.error(f"Error getting request status: {e}") - return jsonify({"success": False, "error": str(e)}), 500 + return jsonify({"status": "error", "error": str(e)}), 500 @bp.route("/result/", methods=["GET"]) @unified_api_decorator() @@ -141,12 +141,12 @@ def get_request_result(request_id: str, **kwargs) -> Tuple[Union[Response, bytes status_info = queue_manager.get_request_status(request_id) if status_info is None: return jsonify({ - "success": False, + "status": "error", "error": "Error: Request not found" }), 404 else: return jsonify({ - "success": False, + "status": "error", "error": "Error: Request not completed yet", "status": status_info["status"] }), 202 # 202 Accepted (still processing) @@ -156,7 +156,7 @@ def get_request_result(request_id: str, **kwargs) -> Tuple[Union[Response, bytes except Exception as e: logger.error(f"Error getting request result: {e}") - return jsonify({"success": False, "error": str(e)}), 500 + return jsonify({"status": "error", "error": str(e)}), 500 @bp.route("/queue/status", methods=["GET"]) @unified_api_decorator() @@ -171,6 +171,6 @@ def get_queue_status(**kwargs) -> Tuple[Response, int]: except Exception as e: logger.error(f"Error getting queue status: {e}") - return jsonify({"success": False, "error": str(e)}), 500 + return jsonify({"status": "error", "error": str(e)}), 500 return bp diff --git a/keepercommander/service/commands/create_service.py b/keepercommander/service/commands/create_service.py index c39962294..612ccdb94 100644 --- a/keepercommander/service/commands/create_service.py +++ b/keepercommander/service/commands/create_service.py @@ -26,6 +26,8 @@ class StreamlineArgs: commands: Optional[str] ngrok: Optional[str] ngrok_custom_domain: Optional[str] + cloudflare: Optional[str] + cloudflare_custom_domain: Optional[str] certfile: Optional[str] certpassword : Optional[str] fileformat : Optional[str] @@ -63,6 +65,8 @@ def get_parser(self): parser.add_argument('-c', '--commands', type=str, help='command list for policy') parser.add_argument('-ng', '--ngrok', type=str, help='ngrok auth token to generate public URL (optional)') parser.add_argument('-cd', '--ngrok_custom_domain', type=str, help='ngrok custom domain name(optional)') + parser.add_argument('-cf', '--cloudflare', type=str, help='cloudflare tunnel token to generate public URL (required when using cloudflare)') + parser.add_argument('-cfd', '--cloudflare_custom_domain', type=str, help='cloudflare custom domain name (required when using cloudflare)') parser.add_argument('-crtf', '--certfile', type=str, help='certificate file path') parser.add_argument('-crtp', '--certpassword', type=str, help='certificate password') parser.add_argument('-f', '--fileformat', type=str, help='file format') @@ -82,7 +86,7 @@ def execute(self, params: KeeperParams, **kwargs) -> None: config_data = self.service_config.create_default_config() - filtered_kwargs = {k: v for k, v in kwargs.items() if k in ['port', 'allowedip', 'deniedip', 'commands', 'ngrok', 'ngrok_custom_domain', 'certfile', 'certpassword', 'fileformat', 'run_mode', 'queue_enabled']} + filtered_kwargs = {k: v for k, v in kwargs.items() if k in ['port', 'allowedip', 'deniedip', 'commands', 'ngrok', 'ngrok_custom_domain', 'cloudflare', 'cloudflare_custom_domain', 'certfile', 'certpassword', 'fileformat', 'run_mode', 'queue_enabled']} args = StreamlineArgs(**filtered_kwargs) self._handle_configuration(config_data, params, args) self._create_and_save_record(config_data, params, args) diff --git a/keepercommander/service/commands/security_config_handler.py b/keepercommander/service/commands/security_config_handler.py index 5d237d58a..389351829 100644 --- a/keepercommander/service/commands/security_config_handler.py +++ b/keepercommander/service/commands/security_config_handler.py @@ -32,7 +32,7 @@ def configure_security(self, config_data: Dict[str, Any]) -> None: config_data["is_advanced_security_enabled"] = ( self.service_config._get_yes_no_input(self.messages['advanced_security_prompt']) ) - config_data["ip_allowed_list"] = '0.0.0.0/0' + config_data["ip_allowed_list"] = '0.0.0.0/0,::/0' if config_data["is_advanced_security_enabled"] == "y": self._configure_advanced_security(config_data) diff --git a/keepercommander/service/commands/service_config_handlers.py b/keepercommander/service/commands/service_config_handlers.py index 166613c2e..51a6dea43 100644 --- a/keepercommander/service/commands/service_config_handlers.py +++ b/keepercommander/service/commands/service_config_handlers.py @@ -27,10 +27,27 @@ def __init__(self, service_config: ServiceConfig): self.messages = self.config['Messages'] self.validation_messages = self.config['Validation_Messages'] + def _get_validated_input(self, prompt_key: str, validation_func, error_key: str, required: bool = False): + """Get and validate user input with consistent error handling.""" + while True: + try: + value = input(self.messages.get(prompt_key, '')).strip() + if required and not value: + print(self.validation_messages.get(f'{error_key}_required', 'This field is required.')) + continue + return validation_func(value) if value else "" + except ValidationError as e: + print(f"{self.validation_messages.get(error_key, 'Invalid input')} {str(e)}") + except (KeyboardInterrupt, EOFError): + print("\nInput cancelled by user.") + raise + except Exception as e: + print(f"Unexpected error: {str(e)}") + @debug_decorator def handle_streamlined_config(self, config_data: Dict[str, Any], args, params: KeeperParams) -> None: if args.allowedip is None: - args.allowedip = '0.0.0.0/0' + args.allowedip = '0.0.0.0/0,::/0' run_mode = args.run_mode if args.run_mode is not None else "foreground" if args.run_mode is not None and run_mode not in ['foreground', 'background']: @@ -43,18 +60,58 @@ def handle_streamlined_config(self, config_data: Dict[str, Any], args, params: K if args.queue_enabled is not None and queue_enabled not in ['y', 'n']: raise ValidationError(f"Invalid queue setting '{queue_enabled}'. Must be 'y' or 'n'.") + # Apply logical tunneling flow for streamlined config + ngrok_enabled = "y" if args.ngrok else "n" + cloudflare_enabled = "y" if args.cloudflare else "n" + + # Implement the same logic as interactive mode + if ngrok_enabled == "y": + # ngrok enabled → disable cloudflare and TLS + cloudflare_enabled = "n" + cloudflare_token = "" + cloudflare_domain = "" + tls_enabled = "n" + certfile = "" + certpassword = "" + logger.debug("Ngrok enabled - disabling cloudflare and TLS") + elif cloudflare_enabled == "y": + # cloudflare enabled → disable TLS, but validate required fields + if not args.cloudflare: + raise ValidationError("Cloudflare tunnel token is required when using Cloudflare tunnel.") + if not args.cloudflare_custom_domain: + raise ValidationError("Cloudflare custom domain is required when using Cloudflare tunnel.") + + tls_enabled = "n" + certfile = "" + certpassword = "" + cloudflare_token = self.service_config.validator.validate_cloudflare_token(args.cloudflare) + cloudflare_domain = self.service_config.validator.validate_domain(args.cloudflare_custom_domain) + logger.debug("Cloudflare enabled - disabling TLS") + else: + # Both ngrok and cloudflare disabled → allow TLS + tls_enabled = "y" if args.certfile and args.certpassword else "n" + certfile = args.certfile if args.certfile else "" + certpassword = args.certpassword if args.certpassword else "" + cloudflare_token = "" + cloudflare_domain = "" + logger.debug("No tunnels enabled - TLS configuration allowed") + config_data.update({ "port": self.service_config.validator.validate_port(args.port), "ip_allowed_list": self.service_config.validator.validate_ip_list(args.allowedip), "ip_denied_list": self.service_config.validator.validate_ip_list(args.deniedip), - "ngrok": "y" if args.ngrok else "n", + "ngrok": ngrok_enabled, "ngrok_auth_token": ( self.service_config.validator.validate_ngrok_token(args.ngrok) - if args.ngrok else "" + if ngrok_enabled == "y" else "" ), - "ngrok_custom_domain": args.ngrok_custom_domain, - "certfile": args.certfile, - "certpassword": args.certpassword, + "ngrok_custom_domain": args.ngrok_custom_domain if ngrok_enabled == "y" else "", + "cloudflare": cloudflare_enabled, + "cloudflare_tunnel_token": cloudflare_token, + "cloudflare_custom_domain": cloudflare_domain, + "tls_certificate": tls_enabled, + "certfile": certfile, + "certpassword": certpassword, "fileformat": args.fileformat, # Keep original logic - can be None "run_mode": run_mode, "queue_enabled": queue_enabled @@ -63,8 +120,7 @@ def handle_streamlined_config(self, config_data: Dict[str, Any], args, params: K @debug_decorator def handle_interactive_config(self, config_data: Dict[str, Any], params: KeeperParams) -> None: self._configure_port(config_data) - self._configure_ngrok(config_data) - self._configure_tls(config_data) + self._configure_tunneling_and_tls(config_data) # New consolidated method self._configure_queue(config_data) config_data["fileformat"] = None @@ -79,6 +135,36 @@ def _configure_port(self, config_data: Dict[str, Any]) -> None: except ValidationError as e: print(f"{self.validation_messages['invalid_port']} {str(e)}") + def _configure_tunneling_and_tls(self, config_data: Dict[str, Any]) -> None: + """ + Configure tunneling and TLS with logical flow: + 1. If ngrok = yes → Skip cloudflare and TLS (ngrok provides public access with SSL) + 2. If ngrok = no → Ask for cloudflare + 3. If ngrok = no AND cloudflare = no → Ask for TLS (local HTTPS) + """ + # First, always ask for ngrok + self._configure_ngrok(config_data) + + if config_data["ngrok"] == "y": + # ngrok provides public access with SSL, so skip cloudflare and TLS + config_data["cloudflare"] = "n" + config_data["cloudflare_tunnel_token"] = "" + config_data["cloudflare_custom_domain"] = "" + config_data["tls_certificate"] = "n" + config_data["certfile"] = "" + config_data["certpassword"] = "" + else: + # ngrok = no, so ask for cloudflare + self._configure_cloudflare(config_data) + + if config_data["cloudflare"] == "y": + # cloudflare provides public access with SSL, so skip TLS + config_data["tls_certificate"] = "n" + config_data["certfile"] = "" + config_data["certpassword"] = "" + else: + # Both ngrok and cloudflare = no, so ask for TLS for local HTTPS + self._configure_tls(config_data) def _configure_ngrok(self, config_data: Dict[str, Any]) -> None: config_data["ngrok"] = self.service_config._get_yes_no_input(self.messages['ngrok_prompt']) @@ -95,6 +181,29 @@ def _configure_ngrok(self, config_data: Dict[str, Any]) -> None: print(f"{self.validation_messages['invalid_ngrok_token']} {str(e)}") else: config_data["ngrok_auth_token"] = "" + + def _configure_cloudflare(self, config_data: Dict[str, Any]) -> None: + config_data["cloudflare"] = self.service_config._get_yes_no_input( + self.messages.get('cloudflare_prompt', 'Do you want to use Cloudflare tunnel? (y/n): ') + ) + + if config_data["cloudflare"] == "y": + config_data["cloudflare_tunnel_token"] = self._get_validated_input( + prompt_key='cloudflare_token_prompt', + validation_func=self.service_config.validator.validate_cloudflare_token, + error_key='invalid_cloudflare_token', + required=True + ) + + config_data["cloudflare_custom_domain"] = self._get_validated_input( + prompt_key='cloudflare_custom_domain_prompt', + validation_func=self.service_config.validator.validate_domain, + error_key='invalid_cloudflare_domain', + required=True + ) + else: + config_data["cloudflare_tunnel_token"] = "" + config_data["cloudflare_custom_domain"] = "" def _configure_tls(self, config_data: Dict[str, Any]) -> None: config_data["tls_certificate"] = self.service_config._get_yes_no_input(self.messages['tls_certificate']) @@ -117,7 +226,7 @@ def _configure_tls(self, config_data: Dict[str, Any]) -> None: def _configure_queue(self, config_data: Dict[str, Any]) -> None: """Configure queue enabled setting with user prompt.""" config_data["queue_enabled"] = self.service_config._get_yes_no_input(self.messages['queue_enabled_prompt']) - logger.debug(f"Queue enabled set to: {config_data['queue_enabled']}") + logger.debug("Queue configuration completed") def _configure_run_mode(self, config_data: Dict[str, Any]) -> None: """Configure run mode with user prompt.""" diff --git a/keepercommander/service/config/cloudflare_config.py b/keepercommander/service/config/cloudflare_config.py new file mode 100644 index 000000000..05efcdca6 --- /dev/null +++ b/keepercommander/service/config/cloudflare_config.py @@ -0,0 +1,205 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' None: + """Validate Cloudflare configuration parameters.""" + required_keys = ["port", "cloudflare_tunnel_token", "cloudflare_custom_domain", "run_mode"] + + for key in required_keys: + if key not in config_data: + raise ValidationError(f"Missing required configuration key: {key}") + + service_config.validator.validate_port(config_data["port"]) + service_config.validator.validate_cloudflare_token(config_data["cloudflare_tunnel_token"]) + service_config.validator.validate_domain(config_data["cloudflare_custom_domain"]) + + if config_data["run_mode"] not in ["foreground", "background"]: + raise ValidationError(f"Invalid run_mode: {config_data['run_mode']}") + + logger.debug("Cloudflare configuration validation successful") + + @staticmethod + def _check_cloudflare_tunnel_health(max_wait_seconds=None): + """ + Check if cloudflare tunnel started successfully by examining the log file. + Returns (success: bool, error_message: str) + """ + max_wait_seconds = max_wait_seconds or CloudflareConfigurator._DEFAULT_HEALTH_CHECK_TIMEOUT + log_file = CloudflareConfigurator._get_cloudflare_log_path() + + for _ in range(max_wait_seconds): + if os.path.exists(log_file): + success, error = CloudflareConfigurator._analyze_tunnel_log(log_file) + if success is not None: # Definitive result + return success, error + time.sleep(CloudflareConfigurator._HEALTH_CHECK_RETRY_INTERVAL) + + return False, CloudflareConfigurator._SUCCESS_TIMEOUT_MESSAGE + + @staticmethod + def _get_cloudflare_log_path() -> str: + """Get the path to the Cloudflare tunnel log file.""" + service_core_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "core") + return os.path.join(service_core_dir, "logs", "cloudflare_tunnel_subprocess.log") + + @staticmethod + def _analyze_tunnel_log(log_file: str) -> tuple[Optional[bool], str]: + """ + Analyze tunnel log content for success/failure indicators. + Returns (success: Optional[bool], error_message: str) + - True: Success detected + - False: Failure detected + - None: No definitive result yet + """ + try: + content = CloudflareConfigurator._read_log_file(log_file) + return CloudflareConfigurator._check_tunnel_patterns(content) + except (IOError, OSError, UnicodeDecodeError) as e: + logger.debug(f"Error reading cloudflare log file: {type(e).__name__}") + return None, "" + except Exception as e: + logger.error(f"Unexpected error reading cloudflare log: {type(e).__name__}") + raise + + @staticmethod + def _read_log_file(log_file: str) -> str: + """Read and return the content of the log file.""" + with open(log_file, 'r', encoding='utf-8') as f: + return f.read() + + @staticmethod + def _check_tunnel_patterns(content: str) -> tuple[Optional[bool], str]: + """Check log content for success/failure patterns.""" + if any(pattern in content for pattern in CloudflareConfigurator._SUCCESS_PATTERNS): + return True, "" + + if any(pattern in content for pattern in CloudflareConfigurator._FAILURE_PATTERNS): + return False, CloudflareConfigurator._NETWORK_FAILURE_MESSAGE + + if "ERR" in content and "Failed" in content: + return False, CloudflareConfigurator._STARTUP_ERROR_MESSAGE + + return None, "" + + @staticmethod + @debug_decorator + def configure_cloudflare(config_data: Dict[str, Any], service_config: ServiceConfig) -> Optional[int]: + """Configure Cloudflare tunnel if enabled. Returns tunnel PID if started in background mode, None otherwise.""" + if config_data.get("cloudflare") != 'y': + return None + + logger.debug("Configuring Cloudflare tunnel") + + try: + CloudflareConfigurator._validate_cloudflare_config(config_data, service_config) + + result = generate_cloudflare_url( + config_data["port"], + config_data["cloudflare_tunnel_token"], + config_data["cloudflare_custom_domain"], + config_data["run_mode"], + ) + + if isinstance(result, tuple): + config_data["cloudflare_public_url"], cloudflare_pid = result + + # Check if tunnel started successfully + logger.debug("Checking Cloudflare tunnel health...") + tunnel_success, error_message = CloudflareConfigurator._check_cloudflare_tunnel_health() + + if not tunnel_success: + CloudflareConfigurator._cleanup_failed_process(cloudflare_pid) + raise Exception(f"Commander Service failed to start: {error_message}") + + if config_data["cloudflare_public_url"]: + print(f'Generated Cloudflare tunnel URL: {config_data["cloudflare_public_url"]}') + else: + print('Cloudflare tunnel started, URL will be available in logs') + return cloudflare_pid + else: + config_data["cloudflare_public_url"] = result + print(f'Generated Cloudflare tunnel URL: {result}') + return None + + except ValidationError as e: + logger.error(f"Invalid Cloudflare configuration: {e}") + raise + except Exception as e: + logger.error(f"Failed to configure Cloudflare tunnel: {e}") + raise + + @staticmethod + def _cleanup_failed_process(cloudflare_pid: Optional[int]) -> None: + """Clean up failed Cloudflare process.""" + if not cloudflare_pid: + return + + logger.debug(f"Cleaning up failed Cloudflare process {cloudflare_pid}") + + try: + if psutil: + try: + process = psutil.Process(cloudflare_pid) + process.terminate() + logger.debug(f"Terminated failed cloudflare process {cloudflare_pid}") + except (psutil.NoSuchProcess, psutil.AccessDenied) as e: + logger.debug(f"Process cleanup - process not found or access denied: {type(e).__name__}") + except psutil.ZombieProcess: + logger.debug(f"Process {cloudflare_pid} is already a zombie process") + except (OSError, IOError) as e: + logger.warning(f"OS error during process cleanup: {type(e).__name__}") + else: + logger.warning("Cannot terminate cloudflare process: psutil not available") + except (KeyboardInterrupt, SystemExit): + logger.info("Process cleanup interrupted by user or system") + raise + except Exception as e: + logger.error(f"Unexpected error during process cleanup: {type(e).__name__}") + diff --git a/keepercommander/service/config/config_validation.py b/keepercommander/service/config/config_validation.py index c30e65f0f..3e627fcad 100644 --- a/keepercommander/service/config/config_validation.py +++ b/keepercommander/service/config/config_validation.py @@ -103,6 +103,56 @@ def validate_ngrok_token(token: str) -> str: logger.debug("Ngrok token validation successful") return token + @staticmethod + def validate_cloudflare_token(token: str) -> str: + """Validate Cloudflare tunnel token""" + logger.debug("Validating Cloudflare tunnel token") + + # Allow empty token for temporary tunnels + if not token or not token.strip(): + logger.debug("Empty Cloudflare token - will use temporary tunnel") + return "" + + # Cloudflare tunnel tokens are typically long base64-encoded strings + # They usually start with "ey" (base64 for '{"') or are alphanumeric with dashes/underscores + if not re.match(r'^[0-9a-zA-Z_\-=+/]{32,}$', token): + msg = "Invalid Cloudflare tunnel token format. Must be a valid tunnel token from Cloudflare dashboard." + raise ValidationError(msg) + + logger.debug("Cloudflare token validation successful") + return token + + @staticmethod + def validate_domain(domain: str) -> str: + """Validate domain name format""" + logger.debug(f"Validating domain: {domain}") + + if not domain or not domain.strip(): + raise ValidationError("Domain cannot be empty") + + domain = domain.strip().lower() + + if len(domain) > 253: + raise ValidationError("Domain name too long (max 253 characters)") + + domain_pattern = r'^[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)*$' + + if not re.match(domain_pattern, domain): + raise ValidationError("Invalid domain format. Domain must contain only letters, numbers, hyphens, and dots.") + + labels = domain.split('.') + for label in labels: + if len(label) > 63: + raise ValidationError(f"Domain label '{label}' too long (max 63 characters)") + if label.startswith('-') or label.endswith('-'): + raise ValidationError(f"Domain label '{label}' cannot start or end with hyphen") + + if '.' not in domain: + raise ValidationError("Please provide a valid domain name") + + logger.debug("Domain validation successful") + return domain + @staticmethod def validate_rate_limit(rate_limit: str) -> str: """Validate rate limiting format""" diff --git a/keepercommander/service/config/models.py b/keepercommander/service/config/models.py index 05b2f7e61..f508a001b 100644 --- a/keepercommander/service/config/models.py +++ b/keepercommander/service/config/models.py @@ -20,10 +20,10 @@ class ServiceConfigData: ngrok: str ngrok_auth_token: str ngrok_custom_domain: str + ngrok_public_url: str tls_certificate: str certfile: str certpassword: str - ngrok_public_url: str is_advanced_security_enabled: str rate_limiting: str ip_allowed_list: str @@ -33,4 +33,8 @@ class ServiceConfigData: fileformat: str run_mode: str queue_enabled: str - records: List[Dict[str, Any]] \ No newline at end of file + records: List[Dict[str, Any]] + cloudflare: str = "n" + cloudflare_tunnel_token: str = "" + cloudflare_custom_domain: str = "" + cloudflare_public_url: str = "" \ No newline at end of file diff --git a/keepercommander/service/config/service_config.py b/keepercommander/service/config/service_config.py index b0014917e..922f346c4 100644 --- a/keepercommander/service/config/service_config.py +++ b/keepercommander/service/config/service_config.py @@ -89,6 +89,10 @@ def create_default_config(self) -> Dict[str, Any]: ngrok_auth_token="", ngrok_custom_domain="", ngrok_public_url="", + cloudflare="n", + cloudflare_tunnel_token="", + cloudflare_custom_domain="", + cloudflare_public_url="", tls_certificate="n", certfile="", certpassword="", @@ -154,17 +158,6 @@ def update_service_config(self, updates: Dict[str, str]) -> None: except Exception as e: logging.info(f"Error updating service configuration: {e}") - - # decrypt the ecnrypted config file , and update configuration and encrypt agin - def read_decrypted_config_file(self, config_path: str, file_format: str, updates: Dict[str, str]): - """ decrypt the ecnrypted config file , and update configuration and encrypt agin """ - config_dir = utils.get_default_path() - decrypted_content = ConfigFormatHandler.decrypt_config_file(config_path.read_bytes(), config_dir) - config_data = json.loads(decrypted_content) if file_format == "json" else yaml.safe_load(decrypted_content) or {} - config_data.update(updates) - encrypted_content = ConfigFormatHandler.encrypted_content(config_data, config_path, config_dir) - with open(config_path, "wb") as f: - f.write(encrypted_content) # Read plain text config file and update configuration def read_plain_text_config_file(self, config_path: str, file_format: str, updates: Dict[str, str]) -> None: @@ -221,6 +214,23 @@ def load_config(self) -> Dict[str, Any]: if 'queue_enabled' not in config: config['queue_enabled'] = 'y' # Default to enabled for existing configs logger.debug("Added default queue_enabled=y for backwards compatibility") + + # Add backwards compatibility for missing Cloudflare fields + if 'cloudflare' not in config: + config['cloudflare'] = 'n' # Default to disabled for existing configs + logger.debug("Added default cloudflare=n for backwards compatibility") + + if 'cloudflare_tunnel_token' not in config: + config['cloudflare_tunnel_token'] = '' + logger.debug("Added default cloudflare_tunnel_token for backwards compatibility") + + if 'cloudflare_custom_domain' not in config: + config['cloudflare_custom_domain'] = '' + logger.debug("Added default cloudflare_custom_domain for backwards compatibility") + + if 'cloudflare_public_url' not in config: + config['cloudflare_public_url'] = '' + logger.debug("Added default cloudflare_public_url for backwards compatibility") self._validate_config_structure(config) return config @@ -237,6 +247,11 @@ def _validate_config_structure(self, config: Dict[str, Any]) -> None: logger.debug("Validating ngrok configuration") self.validator.validate_ngrok_token(config_data.ngrok_auth_token) + if config_data.cloudflare == 'y': + logger.debug("Validating cloudflare configuration") + self.validator.validate_cloudflare_token(config_data.cloudflare_tunnel_token) + self.validator.validate_domain(config_data.cloudflare_custom_domain) + if config_data.is_advanced_security_enabled == 'y': logger.debug("Validating advanced security settings") self.validator.validate_rate_limit(config_data.rate_limiting) diff --git a/keepercommander/service/core/globals.py b/keepercommander/service/core/globals.py index 6434727d7..6c2c84b0e 100644 --- a/keepercommander/service/core/globals.py +++ b/keepercommander/service/core/globals.py @@ -11,6 +11,7 @@ from typing import Optional from ...params import KeeperParams +from ... import utils _current_params: Optional[KeeperParams] = None @@ -19,4 +20,14 @@ def init_globals(params: KeeperParams) -> None: _current_params = params def get_current_params() -> Optional[KeeperParams]: - return _current_params \ No newline at end of file + return _current_params + +def ensure_params_loaded() -> KeeperParams: + """Load params from config if not already loaded.""" + params = get_current_params() + if not params: + from ...__main__ import get_params_from_config + config_path = utils.get_default_path() / "config.json" + params = get_params_from_config(config_path) + init_globals(params) + return params \ No newline at end of file diff --git a/keepercommander/service/core/process_info.py b/keepercommander/service/core/process_info.py index 7459257fe..3c495a48f 100644 --- a/keepercommander/service/core/process_info.py +++ b/keepercommander/service/core/process_info.py @@ -23,6 +23,7 @@ class ProcessInfo: terminal: Optional[str] is_running: bool ngrok_pid: Optional[int] = None + cloudflare_pid: Optional[int] = None _env_file = utils.get_default_path() / ".service.env" @@ -31,7 +32,7 @@ def _str_to_bool(cls, value: str) -> bool: return value.lower() in ('true', '1', 'yes', 'on') @classmethod - def save(cls, pid, is_running: bool, ngrok_pid: Optional[int] = None) -> None: + def save(cls, pid, is_running: bool, ngrok_pid: Optional[int] = None, cloudflare_pid: Optional[int] = None) -> None: """Save current process information to .env file.""" env_path = str(cls._env_file) @@ -49,6 +50,9 @@ def save(cls, pid, is_running: bool, ngrok_pid: Optional[int] = None) -> None: if ngrok_pid is not None: process_info['KEEPER_SERVICE_NGROK_PID'] = str(ngrok_pid) + if cloudflare_pid is not None: + process_info['KEEPER_SERVICE_CLOUDFLARE_PID'] = str(cloudflare_pid) + try: for key, value in process_info.items(): set_key(env_path, key, value, quote_mode='never') @@ -78,18 +82,22 @@ def load(cls) -> 'ProcessInfo': ngrok_pid_str = os.getenv('KEEPER_SERVICE_NGROK_PID') ngrok_pid = int(ngrok_pid_str) if ngrok_pid_str else None + cloudflare_pid_str = os.getenv('KEEPER_SERVICE_CLOUDFLARE_PID') + cloudflare_pid = int(cloudflare_pid_str) if cloudflare_pid_str else None + logger.debug("Process information loaded successfully from .env") return ProcessInfo( pid=pid, terminal=terminal, is_running=is_running, - ngrok_pid=ngrok_pid + ngrok_pid=ngrok_pid, + cloudflare_pid=cloudflare_pid ) except Exception as e: logger.error(f"Failed to load process information: {e}") pass - return ProcessInfo(pid=None, terminal=None, is_running=False, ngrok_pid=None) + return ProcessInfo(pid=None, terminal=None, is_running=False, ngrok_pid=None, cloudflare_pid=None) @classmethod def clear(cls) -> None: diff --git a/keepercommander/service/core/service_app.py b/keepercommander/service/core/service_app.py index 1f97b4ddf..051df174a 100644 --- a/keepercommander/service/core/service_app.py +++ b/keepercommander/service/core/service_app.py @@ -19,6 +19,16 @@ if __name__ == '__main__': service_config = ServiceConfig() config_data = service_config.load_config() + + try: + from ...service.core.globals import ensure_params_loaded + print("Pre-loading Keeper parameters for background mode...") + ensure_params_loaded() + print("Keeper parameters loaded successfully") + except Exception as e: + print(f"Warning: Failed to pre-load parameters during startup: {e}") + print("Parameters will be loaded on first API call if needed") + ssl_context = None if not (port := config_data.get("port")): diff --git a/keepercommander/service/core/service_manager.py b/keepercommander/service/core/service_manager.py index b12b5e760..d598207f3 100644 --- a/keepercommander/service/core/service_manager.py +++ b/keepercommander/service/core/service_manager.py @@ -20,7 +20,7 @@ from .process_info import ProcessInfo from .terminal_handler import TerminalHandler from .signal_handler import SignalHandler -import sys, os, subprocess +import sys, subprocess class ServiceManager: """Manages the lifecycle of the service including start, stop, and status operations.""" @@ -60,29 +60,9 @@ def start_service(cls) -> None: """Start the service if not already running.""" process_info = ProcessInfo.load() - if process_info.pid: - try: - process = psutil.Process(process_info.pid) - - # Check if process is actually running - if process.is_running(): - try: - cmdline = process.cmdline() - if (len(cmdline) >= 2 and - cmdline[0] == sys.executable and - any("service_app.py" in str(arg) for arg in cmdline)): - print(f"Error: Commander Service is already running (PID: {process_info.pid})") - return - except (psutil.AccessDenied, psutil.ZombieProcess): - pass - - # Clear the stored process info if process exists but isn't our service - ProcessInfo.clear() - except psutil.NoSuchProcess: - ProcessInfo.clear() - except Exception as e: - logger.error(f"Error checking process: {str(e)}") - ProcessInfo.clear() + if process_info.pid and process_info.is_running: + print(f"Error: Commander Service is already running (PID: {process_info.pid})") + return SignalHandler.setup_signal_handlers(cls._handle_shutdown) @@ -95,7 +75,7 @@ def start_service(cls) -> None: return from ..config.ngrok_config import NgrokConfigurator - + from ..config.cloudflare_config import CloudflareConfigurator is_running = True queue_enabled = config_data.get("queue_enabled", "y") @@ -108,7 +88,26 @@ def start_service(cls) -> None: print(f"Commander Service starting on {protocol}://localhost:{port}/api/{api_version}/") ngrok_pid = NgrokConfigurator.configure_ngrok(config_data, service_config) - + cloudflare_pid = None + + try: + cloudflare_pid = CloudflareConfigurator.configure_cloudflare(config_data, service_config) + except Exception as e: + if ngrok_pid and psutil: + try: + process = psutil.Process(ngrok_pid) + process.terminate() + logger.debug(f"Terminated ngrok process {ngrok_pid}") + except (psutil.NoSuchProcess, psutil.AccessDenied, OSError) as ngrok_error: + logger.debug(f"Error terminating ngrok process: {type(ngrok_error).__name__}") + elif ngrok_pid: + logger.warning("Cannot terminate ngrok process: psutil not available") + + ProcessInfo.clear() + + logger.info(f"\n{str(e)}") + return + # Custom logging filter to replace SSL handshake errors with user-friendly message class SSLHandshakeFilter(logging.Filter): def filter(self, record): @@ -162,26 +161,111 @@ def filter(self, record): logger.debug(f"Service subprocess logs available at: {log_file}") print(f"Commander Service started with PID: {cls.pid}") - + ProcessInfo.save(cls.pid, is_running, ngrok_pid) + except Exception as e: logger.error(f"Failed to start service subprocess: {e}") raise else: + cleanup_done = False + + def cleanup_cloudflare_on_foreground_exit(): + """Clean up Cloudflare tunnel when foreground service exits.""" + nonlocal cleanup_done + if cleanup_done: + return + cleanup_done = True + + try: + # Try to load PID from ProcessInfo first (more reliable) + try: + process_info = ProcessInfo.load() + saved_cloudflare_pid = process_info.cloudflare_pid + except (KeyboardInterrupt, SystemExit): + raise + except Exception as e: + logger.debug(f"Could not load process info: {e}") + saved_cloudflare_pid = None + + # Kill Cloudflare tunnel if running + cf_pid = saved_cloudflare_pid or cloudflare_pid + if cf_pid: + print(f"Stopping Cloudflare tunnel (PID: {cf_pid})...") + try: + if ServiceManager.kill_process_by_pid(cf_pid): + print("Cloudflare tunnel stopped") + else: + try: + if ServiceManager.kill_cloudflare_processes(): + print("Cloudflare tunnel stopped") + except (KeyboardInterrupt, SystemExit): + raise + except Exception as e: + logger.debug(f"Fallback cloudflare cleanup failed: {e}") + except (KeyboardInterrupt, SystemExit): + raise + except Exception as e: + logger.debug(f"Primary cloudflare cleanup failed: {e}") + try: + if ServiceManager.kill_cloudflare_processes(): + print("Cloudflare tunnel stopped") + except (KeyboardInterrupt, SystemExit): + raise + except Exception as e: + logger.debug(f"Fallback cloudflare cleanup also failed: {e}") + else: + try: + if ServiceManager.kill_cloudflare_processes(): + print("Cloudflare tunnel stopped") + except (KeyboardInterrupt, SystemExit): + raise + except Exception as e: + logger.debug(f"Fallback cloudflare cleanup failed: {e}") + + # Clear process info + try: + ProcessInfo.clear() + except (KeyboardInterrupt, SystemExit): + raise + except Exception as e: + logger.debug(f"Could not clear process info: {e}") + + except (KeyboardInterrupt, SystemExit): + logger.info("Cloudflare cleanup interrupted by user or system") + raise + except Exception as e: + print(f"Unexpected error during Cloudflare cleanup: {e}") + logger.error(f"Unexpected error during Cloudflare cleanup: {e}") + + def foreground_signal_handler(signum, frame): + """Handle interrupt signals in foreground mode.""" + cleanup_cloudflare_on_foreground_exit() + sys.exit(0) + + # Set up signal handlers for foreground mode + import signal + signal.signal(signal.SIGINT, foreground_signal_handler) # Ctrl+C + signal.signal(signal.SIGTERM, foreground_signal_handler) # Termination + from ...service.app import create_app cls._flask_app = create_app() cls._is_running = True + ProcessInfo.save(os.getpid(), is_running, ngrok_pid) ssl_context = ServiceManager.get_ssl_context(config_data) - cls._flask_app.run( - host='0.0.0.0', - port=port, - ssl_context=ssl_context - ) - + try: + cls._flask_app.run( + host='0.0.0.0', + port=port, + ssl_context=ssl_context + ) + finally: + cleanup_cloudflare_on_foreground_exit() + # Save the process ID for future reference - ProcessInfo.save(cls.pid, is_running, ngrok_pid) + ProcessInfo.save(cls.pid, is_running, ngrok_pid, cloudflare_pid) except FileNotFoundError: logging.info("Error: Service configuration file not found. Please use 'service-create' command to create a service_config file.") @@ -196,7 +280,7 @@ def stop_service(cls) -> None: """Stop the service if running.""" process_info = ProcessInfo.load() - logger.debug(f"Loaded process info - Service PID: {process_info.pid}, Ngrok PID: {process_info.ngrok_pid}") + logger.debug(f"Loaded process info - Service PID: {process_info.pid}, Ngrok PID: {process_info.ngrok_pid}, Cloudflare PID: {process_info.cloudflare_pid}") if not process_info.pid: print("Error: No running service found to stop") @@ -264,6 +348,41 @@ def stop_service(cls) -> None: if not ngrok_stopped: logger.debug("No ngrok processes found to stop") + # Stop Cloudflare tunnel process if it exists + cloudflare_stopped = False + if process_info.cloudflare_pid: + try: + logger.debug(f"Attempting to stop Cloudflare tunnel process (PID: {process_info.cloudflare_pid})") + + # Check if Cloudflare process is actually running first + try: + cloudflare_process = psutil.Process(process_info.cloudflare_pid) + logger.debug(f"Cloudflare tunnel process {process_info.cloudflare_pid} is running: {cloudflare_process.name()}") + except psutil.NoSuchProcess: + logger.debug(f"Cloudflare tunnel process {process_info.cloudflare_pid} is not running") + + logger.debug(f"Calling kill_process_by_pid for Cloudflare PID {process_info.cloudflare_pid}") + if ServiceManager.kill_process_by_pid(process_info.cloudflare_pid): + logger.debug(f"Cloudflare tunnel process stopped (PID: {process_info.cloudflare_pid})") + print("Cloudflare tunnel stopped") + cloudflare_stopped = True + else: + logger.warning(f"Failed to stop Cloudflare tunnel process (PID: {process_info.cloudflare_pid})") + except Exception as e: + logger.warning(f"Error stopping Cloudflare tunnel process: {str(e)}") + else: + logger.debug("No Cloudflare tunnel PID found in process info") + + # Fallback: Try to kill any remaining cloudflared processes + if not cloudflare_stopped: + logger.debug("Attempting fallback Cloudflare tunnel cleanup...") + if ServiceManager.kill_cloudflare_processes(): + print("Cloudflare tunnel stopped (via cleanup)") + cloudflare_stopped = True + + if not cloudflare_stopped: + logger.debug("No Cloudflare tunnel processes found to stop") + # Stop the main service process if ServiceManager.kill_process_by_pid(process_info.pid): logger.debug(f"Commander Service stopped (PID: {process_info.pid})") @@ -308,6 +427,14 @@ def get_status() -> str: except psutil.NoSuchProcess: status += f"\nNgrok tunnel is Stopped (was PID: {process_info.ngrok_pid})" + # Check Cloudflare tunnel status if available + if process_info.cloudflare_pid: + try: + psutil.Process(process_info.cloudflare_pid) + status += f"\nCloudflare tunnel is Running (PID: {process_info.cloudflare_pid})" + except psutil.NoSuchProcess: + status += f"\nCloudflare tunnel is Stopped (was PID: {process_info.cloudflare_pid})" + logger.debug(f"Service status check: {status}") return status except psutil.NoSuchProcess: @@ -396,3 +523,40 @@ def kill_ngrok_processes(): except Exception as e: logger.error(f"Exception while killing ngrok processes: {str(e)}") return False + + @staticmethod + def kill_cloudflare_processes(): + """Kill all cloudflared processes as a fallback method.""" + killed_count = 0 + try: + logger.debug("Looking for cloudflared processes to kill...") + + # Find all cloudflared processes + for proc in psutil.process_iter(['pid', 'name', 'cmdline']): + try: + if proc.info['name'] and 'cloudflared' in proc.info['name'].lower(): + pid = proc.info['pid'] + logger.debug(f"Found cloudflared process by name: PID {pid}") + if ServiceManager.kill_process_by_pid(pid): + killed_count += 1 + elif proc.info['cmdline']: + # Check if cloudflared is in the command line + cmdline_str = ' '.join(proc.info['cmdline']) + if 'cloudflared' in cmdline_str.lower() and ('tunnel' in cmdline_str.lower() or 'http' in cmdline_str.lower()): + pid = proc.info['pid'] + logger.debug(f"Found cloudflared process by cmdline: PID {pid}") + if ServiceManager.kill_process_by_pid(pid): + killed_count += 1 + except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): + continue + + if killed_count > 0: + logger.info(f"Killed {killed_count} cloudflared processes") + return True + else: + logger.debug("No cloudflared processes found to kill") + return False + + except Exception as e: + logger.error(f"Exception while killing cloudflared processes: {str(e)}") + return False diff --git a/keepercommander/service/decorators/auth.py b/keepercommander/service/decorators/auth.py index 0f5e18c29..aec0e14f1 100644 --- a/keepercommander/service/decorators/auth.py +++ b/keepercommander/service/decorators/auth.py @@ -23,28 +23,28 @@ def wrapper(*args, **kwargs): api_key = request.headers.get('api-key') if not api_key: return { - 'status': 'fail', - 'message': 'Please provide a valid api key.' + 'status': 'error', + 'error': 'Please provide a valid api key' }, 401 stored_key = ConfigReader.read_config('api-key', api_key) if not stored_key or api_key.strip() != stored_key.strip(): return { - 'status': 'fail', - 'message': 'Please provide a valid api key.' + 'status': 'error', + 'error': 'Please provide a valid api key' }, 401 expiration_timestamp = ConfigReader.read_config('expiration_timestamp', api_key) if not expiration_timestamp: return { - 'status': 'fail', - 'message': 'Invalid or expired API key.' + 'status': 'error', + 'error': 'Invalid or expired API key' }, 401 if datetime.now() > datetime.fromisoformat(expiration_timestamp): return { - 'status': 'fail', - 'message': 'API key has expired.' + 'status': 'error', + 'error': 'API key has expired' }, 401 kwargs['api-key'] = True @@ -66,16 +66,29 @@ def wrapper(*args, **kwargs): api_key = request.headers.get('api-key') policy = ConfigReader.read_config('command_list', api_key) command_content = request_data.get("command") + + if command_content is None: + return { + 'status': 'error', + 'error': 'Missing required field "command"' + }, 400 + + if not isinstance(command_content, str): + return { + 'status': 'error', + 'error': 'Command must be a string' + }, 400 + if len(command_content) > 4096: return { - 'status': 'fail', - 'message': 'Command length exceeded' + 'status': 'error', + 'error': 'Command length exceeded' }, 400 command = command_content.split(" ") if not policy or not policy.strip(): return { - 'status': 'fail', - 'message': 'Not permitted to perform this function' + 'status': 'error', + 'error': 'Not permitted to perform this function' }, 403 allowed_commands = split_to_list(policy, ',') @@ -84,15 +97,35 @@ def wrapper(*args, **kwargs): if not any(command[0] == cmd.strip() for cmd in allowed_commands): return { - 'status': 'fail', - 'message': 'Not permitted to perform this function' + 'status': 'error', + 'error': 'Not permitted to perform this function' }, 403 - if Verifycommand.is_append_command(command): - logger.debug(f"Command : {command[0]}") + # Validate append-notes command + append_error = Verifycommand.validate_append_command(command) + if append_error: + logger.debug(f"Command validation failed: {command[0]} - {append_error}") + return { + 'status': 'error', + 'error': append_error + }, 400 + + # Validate mkdir command + mkdir_error = Verifycommand.validate_mkdir_command(command) + if mkdir_error: + logger.debug(f"Command validation failed: {command[0]} - {mkdir_error}") + return { + 'status': 'error', + 'error': mkdir_error + }, 400 + + # Validate transform-folder command + transform_folder_error = Verifycommand.validate_transform_folder_command(command) + if transform_folder_error: + logger.debug(f"Command validation failed: {command[0]} - {transform_folder_error}") return { - 'status': 'fail', - 'message': 'Invalid command' + 'status': 'error', + 'error': transform_folder_error }, 400 return fn(*args, **kwargs) diff --git a/keepercommander/service/decorators/security.py b/keepercommander/service/decorators/security.py index 3deb007bd..15ce97dbb 100644 --- a/keepercommander/service/decorators/security.py +++ b/keepercommander/service/decorators/security.py @@ -104,8 +104,8 @@ def wrapper(*args, **kwargs): if is_allowed_ip(client_ip, allowed_ips_str, denied_ips_str): return fn(*args, **kwargs) - return jsonify({"error": "IP is not allowed to call API service"}), 403 + return jsonify({"status": "error", "error": "IP is not allowed to call API service"}), 403 except Exception as e: logger.error(f"Security check error: {e}") - return jsonify({"error": "Access denied"}), 403 + return jsonify({"status": "error", "error": "Access denied"}), 403 return wrapper \ No newline at end of file diff --git a/keepercommander/service/util/command_util.py b/keepercommander/service/util/command_util.py index 73bd87da9..b7818ec49 100644 --- a/keepercommander/service/util/command_util.py +++ b/keepercommander/service/util/command_util.py @@ -13,6 +13,7 @@ from pathlib import Path import sys import json +import logging from typing import Any, Tuple, Optional from .config_reader import ConfigReader from .exceptions import CommandExecutionError @@ -42,9 +43,20 @@ def validate_session() -> Optional[Tuple[dict, int]]: @staticmethod @debug_decorator - def capture_output(params: Any, command: str) -> Tuple[Any, str]: + def capture_output_and_logs(params: Any, command: str) -> Tuple[Any, str, str]: + """Capture both stdout/stderr and logging output from command execution.""" captured_stdout = io.StringIO() captured_stderr = io.StringIO() + captured_logs = io.StringIO() + + # Create a temporary log handler to capture command logs + temp_handler = logging.StreamHandler(captured_logs) + temp_handler.setFormatter(logging.Formatter('%(message)s')) + + # Get the root logger to capture all logging output + root_logger = logging.getLogger() + original_level = root_logger.level + original_handlers = root_logger.handlers[:] original_stdout = sys.stdout original_stderr = sys.stderr @@ -53,33 +65,43 @@ def capture_output(params: Any, command: str) -> Tuple[Any, str]: sys.stderr = captured_stderr try: + # Add our temporary handler and set appropriate level + root_logger.addHandler(temp_handler) + root_logger.setLevel(logging.INFO) + return_value = cli.do_command(params, command) + stdout_content = captured_stdout.getvalue() stderr_content = captured_stderr.getvalue() + log_content = captured_logs.getvalue() - # Combine both stderr and stdout to capture all command output + # Combine stdout and stderr stdout_clean = stdout_content.strip() stderr_clean = stderr_content.strip() + log_clean = log_content.strip() if stderr_clean and stdout_clean: combined_output = stderr_clean + '\n' + stdout_clean else: combined_output = stderr_clean or stdout_clean - return return_value, combined_output + return return_value, combined_output, log_clean except Exception as e: # If there's an exception, capture any error output stderr_content = captured_stderr.getvalue() + log_content = captured_logs.getvalue() error_output = stderr_content.strip() if error_output: - # Re-raise with the captured error message raise CommandExecutionError(f"Command failed: {error_output}") raise finally: sys.stdout = original_stdout sys.stderr = original_stderr - + # Restore original logging configuration + root_logger.removeHandler(temp_handler) + root_logger.setLevel(original_level) + temp_handler.close() @staticmethod @@ -103,41 +125,24 @@ def execute(cls, command: str) -> Tuple[Any, int]: if validation_error: return validation_error - service_config = ServiceConfig() - config_data = service_config.load_config() - try: - params = get_current_params() - except Exception as e: - logger.debug(f"Failed to get params from globals: {e}") - params = None - if config_data.get("run_mode") == "background": - try: - if not params: - config_path = utils.get_default_path() / "config.json" - params = get_params_from_config(config_path) - from ..core.globals import init_globals - init_globals(params) - except FileNotFoundError: - logger.error(f"Config file not found at {config_path}") - raise - except Exception as e: - logger.error(f"Failed to load params from config file: {e}") - raise + from ..core.globals import ensure_params_loaded try: + params = ensure_params_loaded() command = html.unescape(command) - return_value, printed_output = CommandExecutor.capture_output(params, command) + return_value, printed_output, log_output = CommandExecutor.capture_output_and_logs(params, command) response = return_value if return_value else printed_output # Debug logging with sanitization sanitized_output = sanitize_debug_data(printed_output) - logger.debug(f"After capture_output - return_value: '{return_value}', printed_output: '{sanitized_output}'") + sanitized_logs = sanitize_debug_data(log_output) + logger.debug(f"After capture_output - return_value: '{return_value}', printed_output: '{sanitized_output}', log_output: '{sanitized_logs}'") sanitized_response = sanitize_debug_data(str(response)) logger.debug(f"Final response: '{sanitized_response}', response type: {type(response)}") cli.do_command(params, 'sync-down') - # Always let the parser handle the response (including empty responses) - response = parse_keeper_response(command, response) + # Always let the parser handle the response (including empty responses and logs) + response = parse_keeper_response(command, response, log_output) response = CommandExecutor.encrypt_response(response) logger.debug("Command executed successfully") @@ -146,7 +151,7 @@ def execute(cls, command: str) -> Tuple[Any, int]: # Return the actual command error instead of generic "server busy" logger.error(f"Command execution error: {e}") error_response = { - "success": False, + "status": "error", "error": str(e) } return error_response, 400 @@ -154,7 +159,7 @@ def execute(cls, command: str) -> Tuple[Any, int]: # Log unexpected errors and return a proper error response logger.error(f"Unexpected error during command execution: {e}") error_response = { - "success": False, + "status": "error", "error": f"Unexpected error: {str(e)}" } return error_response, 500 \ No newline at end of file diff --git a/keepercommander/service/util/parse_keeper_response.py b/keepercommander/service/util/parse_keeper_response.py index 9c7f7daa4..d1d7215c0 100644 --- a/keepercommander/service/util/parse_keeper_response.py +++ b/keepercommander/service/util/parse_keeper_response.py @@ -16,55 +16,126 @@ class KeeperResponseParser: @staticmethod def _clean_ansi_codes(text: str) -> str: """Remove ANSI escape codes from text.""" + if not text: + return text ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') return ansi_escape.sub('', text) @staticmethod - def parse_response(command: str, response: Any) -> Dict[str, Any]: + def _format_multiline_message(text: str) -> str: + """Format multi-line text for better JSON readability.""" + if not text: + return text + + # Check if the text has multiple lines + if '\n' in text: + # Split into lines and return as array for better structure + lines = [line.strip() for line in text.split('\n') if line.strip()] + return lines + + return text + + @staticmethod + def _preprocess_response(response: Any, log_output: str = None) -> tuple[str, bool]: + """Preprocess response by cleaning ANSI codes and determining source. + + Returns: + tuple: (cleaned_response_str, is_from_log_output) + """ + # Priority: use log_output if no regular response OR if response is empty/whitespace + response_str = str(response).strip() if response else "" + log_str = log_output.strip() if log_output else "" + + # Use log_output if response is empty/whitespace but log_output has content + if (not response_str and log_str): + response_str = log_str + is_from_log = True + elif response_str: + is_from_log = False + else: + return "", False + + # Clean ANSI codes once + cleaned_response = KeeperResponseParser._clean_ansi_codes(response_str) + return cleaned_response, is_from_log + + @staticmethod + def _find_parser_method(command: str) -> str: + """Find the appropriate parser method for a command. + + Returns: + str: Method name to call for parsing + """ + # Check for JSON format first (highest priority) + if '--format=json' in command or '--format json' in command: + return '_parse_json_format_command' + + # Check for other substring matches + substring_patterns = { + 'pam project import': '_parse_pam_project_import_command', + 'enterprise-push': '_parse_enterprise_push_command', + 'search record': '_parse_search_record_command', + 'search folder': '_parse_search_folder_command', + } + + for pattern, method_name in substring_patterns.items(): + if pattern in command: + return method_name + + # Check for exact command start matches + exact_patterns = { + 'generate': '_parse_generate_command', + 'ls': '_parse_ls_command', + 'tree': '_parse_tree_command', + 'whoami': '_parse_whoami_command', + 'this-device': '_parse_this_device_command', + 'mkdir': '_parse_mkdir_command', + 'record-add': '_parse_record_add_command', + 'get': '_parse_get_command', + 'download': '_parse_get_command', + } + + for pattern, method_name in exact_patterns.items(): + if command.startswith(pattern): + return method_name + + # Default to logging-based parsing + return '_parse_logging_based_command' + + @staticmethod + def parse_response(command: str, response: Any, log_output: str = None) -> Dict[str, Any]: """ Main parser that routes to specific command parsers based on the command type. Args: command (str): The executed command response (Any): Response from the keeper commander + log_output (str, optional): Captured log output from command execution Returns: Dict[str, Any]: Structured JSON response """ - if not response: + # Preprocess response once + response_str, is_from_log = KeeperResponseParser._preprocess_response(response, log_output) + + # Handle completely empty responses + if not response_str: return KeeperResponseParser._handle_empty_response(command) - response_str = str(response).strip() - # Clean ANSI codes from all responses - response_str = KeeperResponseParser._clean_ansi_codes(response_str) - - if command.startswith('generate'): - return KeeperResponseParser._parse_generate_command(command, response_str) - elif '--format=json' in command: - return KeeperResponseParser._parse_json_format_command(command, response_str) - elif "pam project import" in command: - return KeeperResponseParser._parse_pam_project_import_command(command, response_str) - elif "enterprise-push" in command: - return KeeperResponseParser._parse_enterprise_push_command(command, response_str) - elif command.startswith("ls"): - return KeeperResponseParser._parse_ls_command(response_str) - elif command.startswith("tree"): - return KeeperResponseParser._parse_tree_command(response_str) - elif command.startswith("whoami"): - return KeeperResponseParser._parse_whoami_command(response_str) - elif command.startswith("mkdir"): - return KeeperResponseParser._parse_mkdir_command(response_str) - elif command.startswith("record-add"): - return KeeperResponseParser._parse_record_add_command(response_str) - elif "search record" in command: - return KeeperResponseParser._parse_search_record_command(response_str) - elif "search folder" in command: - return KeeperResponseParser._parse_search_folder_command(response_str) - elif command.startswith("get") or command.startswith("download"): - return KeeperResponseParser._parse_get_command(response_str) - else: - # Check if this is a logging-based command (commands that use logging.info for output) + # If from log output, use logging-based parsing directly + if is_from_log: return KeeperResponseParser._parse_logging_based_command(command, response_str) + + # Find and call the appropriate parser method + parser_method_name = KeeperResponseParser._find_parser_method(command) + parser_method = getattr(KeeperResponseParser, parser_method_name) + + # Call the parser method with appropriate arguments + if parser_method_name in ['_parse_generate_command', '_parse_json_format_command', + '_parse_pam_project_import_command', '_parse_enterprise_push_command']: + return parser_method(command, response_str) + else: + return parser_method(response_str) if parser_method_name != '_parse_logging_based_command' else parser_method(command, response_str) @staticmethod def _parse_ls_command(response: str) -> Dict[str, Any]: @@ -141,27 +212,131 @@ def _parse_tree_command(response: str) -> Dict[str, Any]: result = { "status": "success", "command": "tree", - "data": [] + "data": { + "tree": [], + "share_permissions_key": None + } } + lines = response.strip().split("\n") current_path = [] - for line in response.strip().split("\n"): + + # Check if this has share permissions key + if "Share Permissions Key:" in response: + # Extract share permissions key + key_lines = [] + tree_start_idx = 0 + + for i, line in enumerate(lines): + if "Share Permissions Key:" in line: + # Find the key section + j = i + while j < len(lines) and not (line.strip() and " ├── " in lines[j] or " └── " in lines[j] or lines[j].strip() == "My Vault"): + if lines[j].strip() and not lines[j].startswith("="): + if "=" in lines[j] and lines[j].strip() != "Share Permissions Key:": + key_lines.append(lines[j].strip()) + j += 1 + tree_start_idx = j + break + + if key_lines: + result["data"]["share_permissions_key"] = key_lines + + # Use only the tree part + lines = lines[tree_start_idx:] + + for line in lines: if not line.strip(): continue + + # Skip the root "My Vault" line + if line.strip() == "My Vault": + continue + + # Skip share permission lines + if any(perm in line for perm in ["Share Permissions Key:", "=======", "RO =", "MU =", "MR =", "CE =", "CS ="]): + continue - level = (len(line) - len(line.lstrip())) // 2 - name = line.strip() + # Calculate level based on tree characters + cleaned_line = line + level = 0 + + # Count tree depth by looking for tree characters + while True: + if cleaned_line.startswith(" │ "): + level += 1 + cleaned_line = cleaned_line[4:] + elif cleaned_line.startswith(" ├── "): + cleaned_line = cleaned_line[5:] + break + elif cleaned_line.startswith(" └── "): + cleaned_line = cleaned_line[5:] + break + elif cleaned_line.startswith(" "): + level += 1 + cleaned_line = cleaned_line[4:] + else: + break + # Parse the cleaned line + name = cleaned_line.strip() + if not name: + continue + + is_record = "[Record]" in name + is_shared = "[SHARED]" in name + + # Extract UID if present (for -v flag) + uid = None + uid_match = re.search(r'\(([^)]+)\)', name) + if uid_match and not any(x in uid_match.group(1) for x in ["default:", "user:"]): + uid = uid_match.group(1) + + # Extract share permissions if present (for -s flag) + share_permissions = None + perm_match = re.search(r'\(default:([^;]+); user:([^)]+)\)', name) + if perm_match: + share_permissions = { + "default": perm_match.group(1), + "user": perm_match.group(2) + } + + # Clean the name from all indicators + clean_name = name + clean_name = re.sub(r' \([^)]*\) \[SHARED\] \([^)]*\)', '', clean_name) # Remove UID + SHARED + permissions + clean_name = re.sub(r' \([^)]*\) \[Record\]', '', clean_name) # Remove UID + Record + clean_name = re.sub(r' \([^)]*\) \[SHARED\]', '', clean_name) # Remove UID + SHARED + clean_name = re.sub(r' \([^)]*\)', '', clean_name) # Remove just UID + clean_name = clean_name.replace(" [Record]", "").replace(" [SHARED]", "") + + # Determine type + item_type = "record" if is_record else "folder" + + # Build path while len(current_path) > level: current_path.pop() + if len(current_path) == level: - current_path.append(name) + current_path.append(clean_name) + else: + current_path = current_path[:level] + [clean_name] - result["data"].append({ + # Create item structure + item = { + "name": clean_name, + "type": item_type, "level": level, - "name": name, - "path": "/".join(current_path) - }) + "path": "/".join(current_path), + "shared": is_shared + } + + # Add optional fields + if uid: + item["uid"] = uid + if share_permissions: + item["share_permissions"] = share_permissions + + result["data"]["tree"].append(item) return result @@ -256,6 +431,56 @@ def _parse_whoami_command(response: str) -> Dict[str, Any]: return result + @staticmethod + def _parse_this_device_command(response: str) -> Dict[str, Any]: + """Parse 'this-device' command output into structured format.""" + result = { + "status": "success", + "command": "this-device", + "data": {} + } + + # Parse each line of the this-device output + for line in response.strip().split("\n"): + line = line.strip() + if not line or line == "": + continue + + if ":" in line: + # Split on first colon only + key, value = line.split(":", 1) + key = key.strip() + value = value.strip() + + # Convert key to snake_case and normalize + key_normalized = key.lower().replace(" ", "_").replace("-", "_") + + # Handle special cases and convert values + if key_normalized == "device_name": + result["data"]["device_name"] = value + elif key_normalized == "data_key_present": + result["data"]["data_key_present"] = value.upper() == "YES" + elif key_normalized == "ip_auto_approve": + result["data"]["ip_auto_approve"] = value.upper() == "ON" + elif key_normalized == "persistent_login": + result["data"]["persistent_login"] = value.upper() == "ON" + elif key_normalized == "security_key_no_pin": + result["data"]["security_key_no_pin"] = value.upper() == "ON" + elif key_normalized == "device_logout_timeout": + result["data"]["device_logout_timeout"] = value + elif key_normalized == "is_sso_user": + result["data"]["is_sso_user"] = value.lower() == "true" + else: + # Generic handling for other fields + result["data"][key_normalized] = value + elif "Available sub-commands:" in line: + # Extract sub-commands + sub_commands = line.split("Available sub-commands:")[1].strip() + if sub_commands: + result["data"]["available_sub_commands"] = [cmd.strip() for cmd in sub_commands.split(",")] + + return result + @staticmethod def _parse_mkdir_command(response: str) -> Dict[str, Any]: """Parse 'mkdir' command output to extract folder UID.""" @@ -287,12 +512,23 @@ def _parse_record_add_command(response: str) -> Dict[str, Any]: response_str = response.strip() # Check for error messages first - if ("error" in response_str.lower() or "failed" in response_str.lower() or - "invalid" in response_str.lower() or "already exists" in response_str.lower()): + response_lower = response_str.lower() + error_patterns = ["error", "failed", "invalid"] + warning_patterns = ["already exists"] + + if any(pattern in response_lower for pattern in error_patterns): return { - "success": False, + "status": "error", + "command": "record-add", "error": response_str } + elif any(pattern in response_lower for pattern in warning_patterns): + return { + "status": "warning", + "command": "record-add", + "message": response_str, + "data": None + } # Success case - try to extract UID result = { @@ -376,7 +612,18 @@ def _parse_get_command(response: str) -> Dict[str, Any]: "data": {} } - for line in response.strip().split("\n"): + response_lines = response.strip().split("\n") + + # Handle special case for --format password (single line with just the password) + if len(response_lines) == 1 and ":" not in response_lines[0]: + # This is likely a password from --format password + password_line = response_lines[0].strip() + if password_line: + result["data"]["password"] = password_line + return result + + # Handle regular get command output with key-value pairs + for line in response_lines: if ":" in line: key, value = line.split(":", 1) result["data"][key.strip().lower().replace(" ", "_")] = value.strip() @@ -469,7 +716,13 @@ def _parse_json_format_command(command: str, response: str) -> Dict[str, Any]: Returns: Dict[str, Any]: Structured response with standard format """ - base_command = command.split(' --format=json')[0] + # Extract base command by removing --format=json or --format json + if ' --format=json' in command: + base_command = command.split(' --format=json')[0] + elif ' --format json' in command: + base_command = command.split(' --format json')[0] + else: + base_command = command.split()[0] if command.split() else command result = { "status": "success", "command": base_command, @@ -499,7 +752,8 @@ def _parse_generate_command(command: str, response_str: str) -> Dict[str, Any]: """Parse generate command output to extract password(s) and metadata.""" if not response_str: return { - "success": False, + "status": "error", + "command": "generate", "error": "Generate command produced no output" } @@ -600,9 +854,9 @@ def _parse_generate_command(command: str, response_str: str) -> Dict[str, Any]: @staticmethod def _handle_empty_response(command: str) -> Dict[str, Any]: """Handle commands that produce no output but are successful.""" - # These commands truly produce no output and should be treated as silent success + # These commands truly produce no output (no logs, no stdout) and should be treated as silent success silent_success_commands = [ - "sync-down", "logout", "keep-alive", "set", "mkdir", "import" + "sync-down", "logout", "keep-alive", "set", "record-update", "append-notes", "mv", "ln" ] if any(cmd in command for cmd in silent_success_commands): @@ -615,10 +869,14 @@ def _handle_empty_response(command: str) -> Dict[str, Any]: message = "Session kept alive successfully" elif "set" in command: message = "Configuration updated successfully" - elif "mkdir" in command: - message = "Folder already exists" - elif "import" in command: - message = "Import completed successfully" + elif "record-update" in command: + message = "Record updated successfully" + elif "append-notes" in command: + message = "Notes appended successfully" + elif "mv" in command: + message = "Item moved successfully" + elif "ln" in command: + message = "Link created successfully" else: message = "Command executed successfully" @@ -630,8 +888,10 @@ def _handle_empty_response(command: str) -> Dict[str, Any]: } else: return { - "success": False, - "error": "Command produced no output. This may indicate a command error or invalid syntax." + "status": "success", + "command": command.split()[0] if command.split() else command, + "message": "Command executed successfully but produced no output", + "data": None } @staticmethod @@ -664,110 +924,92 @@ def _parse_logging_based_command(command: str, response_str: str) -> Dict[str, A """Parse commands that primarily use logging.info() for output.""" response_str = response_str.strip() - # Check for common error patterns first (from both logging.info and logging.warning) + # Filter out biometric and persistent login messages for cleaner API responses + response_str = KeeperResponseParser._filter_login_messages(response_str) + + # Determine status based on common patterns + status = "success" + + # Check for error patterns (case insensitive) error_patterns = [ "error", "failed", "invalid", "not found", "does not exist", - "already exists", "permission denied", "unauthorized", "warning:", - "cannot be", "character", "reserved" + "permission denied", "unauthorized", "cannot be", "character", "reserved" ] - if any(pattern in response_str.lower() for pattern in error_patterns): + # Check for warning patterns + warning_patterns = ["warning:", "already exists"] + + response_lower = response_str.lower() + + if any(pattern in response_lower for pattern in error_patterns): return { - "success": False, + "status": "error", + "command": command.split()[0] if command.split() else command, "error": response_str } + elif any(pattern in response_lower for pattern in warning_patterns): + status = "warning" - # Parse logging-based success messages - success_patterns = { - # Enterprise commands - "user deleted": "User deleted successfully", - "user updated": "User updated successfully", - "user created": "User created successfully", - "role created": "Role created successfully", - "role updated": "Role updated successfully", - "role deleted": "Role deleted successfully", - "team created": "Team created successfully", - "team updated": "Team updated successfully", - "team deleted": "Team deleted successfully", - "role assigned": "Role assigned successfully", - "role removed": "Role removed successfully", - - # Record operations - "records deleted successfully": response_str, - "records imported successfully": response_str, - "record updated": "Record updated successfully", - "record added": "Record added successfully", - - # Folder operations - "folder removed": "Folder removed successfully", - "folder renamed": "Folder renamed successfully", - "items moved": "Items moved successfully", - - # Attachment operations - "attachment uploaded": "Attachment uploaded successfully", - "attachment deleted": "Attachment deleted successfully", - "notes appended": "Notes appended successfully", - - # Security operations - "security data": "Security data synchronized successfully", - "master password": "Master password updated successfully", - - # Transfer operations - "transfer accepted": "Transfer accepted successfully", - "account transfer": "Account transfer completed successfully", - - # Share operations - "share added": "Share added successfully", - "share updated": "Share updated successfully", - "share removed": "Share removed successfully", - - # Clipboard operations - "copied to clipboard": "Copied to clipboard successfully", - - # General success indicators - "successfully": response_str, - "completed": response_str, - "updated": response_str if "updated" in response_str else "Update completed successfully", - "created": response_str if "created" in response_str else "Creation completed successfully", - "deleted": response_str if "deleted" in response_str else "Deletion completed successfully" - } - - # Find matching success pattern - for pattern, message in success_patterns.items(): - if pattern in response_str.lower(): - return { - "status": "success", - "command": command.split()[0] if command.split() else command, - "message": message, - "data": response_str if message == response_str else None - } - - # Default handling for unmatched responses + # Return the actual log message with proper formatting if response_str: - # If there's output but no clear pattern, assume success and return the output + formatted_message = KeeperResponseParser._format_multiline_message(response_str) return { - "status": "success", + "status": status, "command": command.split()[0] if command.split() else command, - "message": response_str, + "message": formatted_message, "data": None } else: - # No output - this should have been caught by _handle_empty_response - return { - "success": False, - "error": "Command produced no output. This may indicate a command error or invalid syntax." - } + # No output after cleaning - use existing empty response handler + return KeeperResponseParser._handle_empty_response(command) + + @staticmethod + def _filter_login_messages(response_str: str) -> str: + """Filter out biometric and persistent login messages from response.""" + if not response_str: + return response_str + + # Common login messages to filter out + login_patterns = [ + "Logging in to Keeper Commander", + "Successfully authenticated with Persistent Login", + "Successfully authenticated with Biometric Login", + "Attempting biometric authentication...", + "Press Ctrl+C to skip biometric and use default login method", + "Syncing...", + "Decrypted [", + "records that are affected by breaches", + "Use \"breachwatch list\" command" + ] + + lines = response_str.split('\n') + filtered_lines = [] + + for line in lines: + line = line.strip() + if not line: + continue + + # Skip login-related lines + if any(pattern in line for pattern in login_patterns): + continue + + filtered_lines.append(line) + + return '\n'.join(filtered_lines) + -def parse_keeper_response(command: str, response: Any) -> Dict[str, Any]: +def parse_keeper_response(command: str, response: Any, log_output: str = None) -> Dict[str, Any]: """ Main entry point for parsing Keeper Commander responses. Args: command (str): The executed command response (Any): Response from the keeper commander + log_output (str, optional): Captured log output from command execution Returns: Dict[str, Any]: Structured JSON response """ - return KeeperResponseParser.parse_response(command, response) \ No newline at end of file + return KeeperResponseParser.parse_response(command, response, log_output) \ No newline at end of file diff --git a/keepercommander/service/util/request_validation.py b/keepercommander/service/util/request_validation.py index 3d365eda9..f2750f98b 100644 --- a/keepercommander/service/util/request_validation.py +++ b/keepercommander/service/util/request_validation.py @@ -33,16 +33,16 @@ def validate_and_escape_command(request_data: Dict[str, Any]) -> Tuple[Optional[ """ if not request_data: logger.info("Request validation failed: No JSON data provided") - return None, (jsonify({"success": False, "error": "Error: No JSON data provided"}), 400) + return None, (jsonify({"status": "error", "error": "No JSON data provided"}), 400) command = request_data.get("command") if not command: - logger.info("Request validation failed: No command provided") - return None, (jsonify({"success": False, "error": "Error: No command provided"}), 400) + logger.info("Request validation failed: Missing required field 'command' or incorrect field name") + return None, (jsonify({"status": "error", "error": "Missing required field \"command\" or incorrect field name"}), 400) if not isinstance(command, str): logger.warning("Request validation failed: Command must be a string") - return None, (jsonify({"success": False, "error": "Error: Command must be a string"}), 400) + return None, (jsonify({"status": "error", "error": "Command must be a string"}), 400) # Escape HTML to prevent XSS escaped_command = escape(command) @@ -125,10 +125,10 @@ def validate_request_json() -> Optional[Tuple]: """ if not request.is_json: logger.info("Request validation failed: Content-Type must be application/json") - return jsonify({"success": False, "error": "Error: Content-Type must be application/json"}), 400 + return jsonify({"status": "error", "error": "Content-Type must be application/json"}), 400 if not request.json: logger.info("Request validation failed: Invalid or empty JSON") - return jsonify({"success": False, "error": "Error: Invalid or empty JSON"}), 400 + return jsonify({"status": "error", "error": "Invalid or empty JSON"}), 400 return None diff --git a/keepercommander/service/util/tunneling.py b/keepercommander/service/util/tunneling.py index a6713cc9c..5c650de9f 100644 --- a/keepercommander/service/util/tunneling.py +++ b/keepercommander/service/util/tunneling.py @@ -17,6 +17,8 @@ import time import requests import re +import json +import tempfile def start_ngrok(port, auth_token=None, subdomain=None): @@ -213,6 +215,222 @@ def generate_ngrok_url(port, auth_token, ngrok_custom_domain, run_mode): tunnel = ngrok.connect(port, pyngrok_config=ngrok_config) return tunnel.public_url, None + finally: + os.dup2(old_stdout_fd, 1) + os.dup2(old_stderr_fd, 2) + os.close(old_stdout_fd) + os.close(old_stderr_fd) + + +# Cloudflare Tunnel Functions + +def _download_cloudflared(): + """ + Download cloudflared binary if not available. + Returns path to cloudflared binary. + """ + try: + # First try to find existing cloudflared + result = subprocess.run(['which', 'cloudflared'], capture_output=True, text=True) + if result.returncode == 0: + return result.stdout.strip() + except: + pass + + # Download cloudflared binary + import platform + import urllib.request + + system = platform.system().lower() + machine = platform.machine().lower() + + # Determine the correct binary URL + if system == "linux": + if "arm" in machine or "aarch64" in machine: + url = "https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-arm64" + else: + url = "https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64" + elif system == "darwin": # macOS + if "arm64" in machine or "aarch64" in machine: + url = "https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-darwin-arm64.tgz" + else: + url = "https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-darwin-amd64.tgz" + elif system == "windows": + url = "https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-windows-amd64.exe" + else: + raise Exception(f"Unsupported platform: {system}") + + # Create temp directory for cloudflared + service_core_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "core") + bin_dir = os.path.join(service_core_dir, "bin") + os.makedirs(bin_dir, exist_ok=True) + + binary_name = "cloudflared.exe" if system == "windows" else "cloudflared" + binary_path = os.path.join(bin_dir, binary_name) + + if os.path.exists(binary_path): + return binary_path + + logging.info("Downloading cloudflared binary...") + + if url.endswith('.tgz'): + # Handle compressed download for macOS + import tarfile + with tempfile.NamedTemporaryFile(delete=False) as tmp_file: + urllib.request.urlretrieve(url, tmp_file.name) + with tarfile.open(tmp_file.name, 'r:gz') as tar: + tar.extractall(bin_dir) + os.unlink(tmp_file.name) + else: + urllib.request.urlretrieve(url, binary_path) + + # Make executable on Unix systems + if system != "windows": + os.chmod(binary_path, 0o755) + + return binary_path + + +def start_cloudflare_tunnel(port, tunnel_token, custom_domain=None): + """ + Start Cloudflare tunnel as a detached subprocess and return the PID. + """ + # Use direct cloudflared binary approach + return _start_cloudflare_with_binary(port, tunnel_token, custom_domain) + + +def _start_cloudflare_with_binary(port, tunnel_token, custom_domain=None): + """ + Start Cloudflare tunnel using cloudflared binary. + """ + cloudflared_path = _download_cloudflared() + + if tunnel_token and tunnel_token.strip(): + # Named tunnel with token - need to specify local service URL + cloudflared_cmd = [cloudflared_path, "tunnel", "run", "--token", tunnel_token, "--url", f"http://localhost:{port}"] + if custom_domain: + # For named tunnels, domain is configured in Cloudflare dashboard + logging.info(f"Using custom domain: {custom_domain} (configured in Cloudflare dashboard)") + else: + raise Exception( + "Tunnel token is required for secure tunnel operation. " + "Quick tunnels are not supported for production use. " + "Please provide a valid Cloudflare tunnel token." + ) + + service_core_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "core") + log_dir = os.path.join(service_core_dir, "logs") + os.makedirs(log_dir, exist_ok=True) + log_file = os.path.join(log_dir, "cloudflare_tunnel_subprocess.log") + + if sys.platform == "win32": + subprocess.DETACHED_PROCESS = 0x00000008 + with open(log_file, 'w') as log_f: + process = subprocess.Popen( + cloudflared_cmd, + creationflags=subprocess.DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP, + stdout=log_f, + stderr=subprocess.STDOUT, + cwd=service_core_dir, + env=os.environ.copy() + ) + else: + with open(log_file, 'w') as log_f: + process = subprocess.Popen( + cloudflared_cmd, + stdout=log_f, + stderr=subprocess.STDOUT, + preexec_fn=os.setpgrp, + cwd=service_core_dir, + env=os.environ.copy() + ) + + tunnel_url = get_cloudflare_url_from_log(log_file, custom_domain) + + return process.pid, tunnel_url + + +def get_cloudflare_url_from_log(log_file, custom_domain=None, max_retries=10, retry_delay=1): + """ + Parse the Cloudflare tunnel log file to extract the public URL. + Returns the public URL if found, None otherwise. + """ + # Patterns to match different Cloudflare tunnel URL formats + url_patterns = [ + r'https://[a-zA-Z0-9\-]+\.trycloudflare\.com', + r'https://[a-zA-Z0-9\-\.]+\.cfargotunnel\.com', + r'https://[a-zA-Z0-9\-\.]+', # Generic HTTPS URLs + ] + + for attempt in range(max_retries): + try: + if os.path.exists(log_file): + with open(log_file, 'r') as f: + content = f.read() + + # Look for URL patterns in the log + for pattern in url_patterns: + matches = re.findall(pattern, content) + for match in matches: + # Filter out localhost and other non-public URLs + if ('localhost' not in match and + '127.0.0.1' not in match and + 'trycloudflare.com' in match or 'cfargotunnel.com' in match or custom_domain in match if custom_domain else True): + return match + + except Exception as e: + logging.debug(f"Error reading Cloudflare tunnel log file: {e}") + + # Wait and retry if URL not found yet + if attempt < max_retries - 1: + time.sleep(retry_delay) + + # If custom domain provided and no URL found, construct it + if custom_domain: + return f"https://{custom_domain}" + + return None + + +def start_cloudflare_tunnel_with_url(port, tunnel_token, custom_domain=None): + """ + Start Cloudflare tunnel subprocess and return both PID and the actual public URL. + Returns a tuple (pid, public_url). + """ + pid, public_url = start_cloudflare_tunnel(port, tunnel_token, custom_domain) + return pid, public_url + + +def generate_cloudflare_url(port, tunnel_token, custom_domain, run_mode): + """ + Start a Cloudflare tunnel with complete log suppression. + Returns a tuple of (public_url, tunnel_pid) for background mode, or (public_url, None) for foreground mode. + """ + if not port: + raise ValueError("Port must be provided for Cloudflare tunnel.") + + if not tunnel_token or not tunnel_token.strip(): + raise ValueError( + "Tunnel token is required for secure Cloudflare tunnel operation. " + "Temporary tunnels are not supported for production use." + ) + + # Cloudflare tunnel configuration + + with open(os.devnull, 'w') as devnull: + old_stdout_fd = os.dup(1) + old_stderr_fd = os.dup(2) + os.dup2(devnull.fileno(), 1) + os.dup2(devnull.fileno(), 2) + + try: + tunnel_pid, public_url = start_cloudflare_tunnel_with_url( + port=port, + tunnel_token=tunnel_token, + custom_domain=custom_domain + ) + return public_url, tunnel_pid + finally: os.dup2(old_stdout_fd, 1) os.dup2(old_stderr_fd, 2) diff --git a/keepercommander/service/util/verified_command.py b/keepercommander/service/util/verified_command.py index ad4a97299..72c9c97ae 100644 --- a/keepercommander/service/util/verified_command.py +++ b/keepercommander/service/util/verified_command.py @@ -1,12 +1,115 @@ class Verifycommand: @staticmethod - def is_append_command(command): + def validate_append_command(command): """ - Returns True if the command is 'append-notes' and '--notes' is NOT present. + Validates 'append-notes' command and returns error message if invalid. + Returns None if valid, error message string if invalid. """ if not command or command[0] != "append-notes": - return False + return None + + has_notes = False for arg in command[1:]: if arg.startswith("--notes="): - return arg == "--notes=" - return True \ No newline at end of file + # Check if --notes= has a value after the equals sign + notes_value = arg[8:] # Everything after "--notes=" + has_notes = bool(notes_value.strip()) + break + elif arg == "--notes": + # Check if there's a value after --notes flag + arg_index = command.index(arg) + if arg_index + 1 < len(command) and not command[arg_index + 1].startswith("-"): + notes_value = command[arg_index + 1] + has_notes = bool(notes_value.strip()) + break + + if not has_notes: + return "Missing required parameter: --notes with non-empty value" + return None + + @staticmethod + def validate_mkdir_command(command): + """ + Validates 'mkdir' command and returns error message if invalid. + Returns None if valid, error message string if invalid. + """ + if not command or command[0] != "mkdir": + return None + + has_sf_or_uf = False + has_name = False + + for arg in command[1:]: + # Check for shared folder or user folder flags + if arg in ["-sf", "--shared-folder", "-uf", "--user-folder"]: + has_sf_or_uf = True + # Check for folder name (non-flag argument) + elif not arg.startswith("-"): + has_name = True + + missing_params = [] + if not has_sf_or_uf: + missing_params.append("folder type flag (-sf/--shared-folder for shared folder or -uf/--user-folder for user folder)") + if not has_name: + missing_params.append("folder name") + + if missing_params: + return f"Missing required parameters: {' and '.join(missing_params)}" + return None + + # Legacy methods for backward compatibility + @staticmethod + def is_append_command(command): + """Legacy method - returns True if command is invalid.""" + return Verifycommand.validate_append_command(command) is not None + + @staticmethod + def is_mkdir_command(command): + """Legacy method - returns True if command is invalid.""" + return Verifycommand.validate_mkdir_command(command) is not None + + @staticmethod + def validate_transform_folder_command(command): + """ + Validates 'transform-folder' command and returns error message if invalid. + Returns None if valid, error message string if invalid. + """ + if not command or command[0] != "transform-folder": + return None + + has_force_flag = False + has_folder_uid = False + + for arg in command[1:]: + # Check for -f or --force flag + if arg in ["-f", "--force"]: + has_force_flag = True + # Check for folder UID (non-flag argument) + elif not arg.startswith("-"): + has_folder_uid = True + + missing_params = [] + if not has_force_flag: + missing_params.append("-f/--force flag to bypass interactive confirmation") + if not has_folder_uid: + missing_params.append("folder UID or path") + + if missing_params: + return f"Missing required parameters: {' and '.join(missing_params)}" + return None + + # Legacy methods for backward compatibility + @staticmethod + def is_append_command(command): + """Legacy method - returns True if command is invalid.""" + return Verifycommand.validate_append_command(command) is not None + + @staticmethod + def is_mkdir_command(command): + """Legacy method - returns True if command is invalid.""" + return Verifycommand.validate_mkdir_command(command) is not None + + @staticmethod + def is_transform_folder_command(command): + """Legacy method - returns True if command is invalid.""" + return Verifycommand.validate_transform_folder_command(command) is not None \ No newline at end of file diff --git a/unit-tests/service/test_auth_security.py b/unit-tests/service/test_auth_security.py index e8d0d4136..3f93b6610 100644 --- a/unit-tests/service/test_auth_security.py +++ b/unit-tests/service/test_auth_security.py @@ -24,8 +24,8 @@ def test_auth_check_missing_api_key(self): with self.app.test_request_context('/test', method='POST'): response = auth_check(lambda *args, **kwargs: ({'status': 'success'}, 200))() self.assertEqual(response[1], 401) - self.assertEqual(response[0]['status'], 'fail') - self.assertIn('api key', response[0]['message']) + self.assertEqual(response[0]['status'], 'error') + self.assertIn('api key', response[0]['error']) @mock.patch.object(ConfigReader, 'read_config') def test_auth_check_invalid_api_key(self, mock_read_config): @@ -36,7 +36,7 @@ def test_auth_check_invalid_api_key(self, mock_read_config): headers={'api-key': 'test_key'}): response = auth_check(lambda *args, **kwargs: ({'status': 'success'}, 200))() self.assertEqual(response[1], 401) - self.assertEqual(response[0]['status'], 'fail') + self.assertEqual(response[0]['status'], 'error') @mock.patch.object(ConfigReader, 'read_config') def test_auth_check_expired_key(self, mock_read_config): @@ -50,8 +50,8 @@ def test_auth_check_expired_key(self, mock_read_config): headers={'api-key': 'test_key'}): response = auth_check(lambda *args, **kwargs: ({'status': 'success'}, 200))() self.assertEqual(response[1], 401) - self.assertEqual(response[0]['status'], 'fail') - self.assertIn('expired', response[0]['message']) + self.assertEqual(response[0]['status'], 'error') + self.assertIn('expired', response[0]['error']) # def test_security_check_blocked_ip(self): # """Test security check with blocked IP""" @@ -98,5 +98,5 @@ def test_policy_check_denied_command(self, mock_read_config): json={"command": "delete"}): response = policy_check(lambda *args, **kwargs: ({'status': 'success'}, 200))() self.assertEqual(response[1], 403) - self.assertEqual(response[0]['status'], 'fail') - self.assertIn('Not permitted', response[0]['message']) \ No newline at end of file + self.assertEqual(response[0]['status'], 'error') + self.assertIn('Not permitted', response[0]['error']) \ No newline at end of file diff --git a/unit-tests/service/test_command.py b/unit-tests/service/test_command.py index 4b247e7c5..fbdcb9845 100644 --- a/unit-tests/service/test_command.py +++ b/unit-tests/service/test_command.py @@ -92,7 +92,8 @@ def test_response_parsing(self): tree_response = "Root\n Folder1\n SubFolder1" parsed = parse_keeper_response("tree", tree_response) self.assertEqual(parsed["command"], "tree") - self.assertIsInstance(parsed["data"], list) + self.assertIsInstance(parsed["data"], dict) + self.assertIn("tree", parsed["data"]) def test_capture_output(self): """Test command output capture""" @@ -101,7 +102,7 @@ def test_capture_output(self): mock_params = {"session": "active"} with mock.patch('keepercommander.cli.do_command', return_value=expected_output): - return_value, output = CommandExecutor.capture_output(mock_params, test_command) + return_value, output, logs = CommandExecutor.capture_output_and_logs(mock_params, test_command) self.assertEqual(return_value, expected_output) @unittest.skip diff --git a/unit-tests/service/test_create_service.py b/unit-tests/service/test_create_service.py index 42e1f3b27..d2acb10c6 100644 --- a/unit-tests/service/test_create_service.py +++ b/unit-tests/service/test_create_service.py @@ -22,6 +22,12 @@ def test_get_parser(self): args = parser.parse_args(['--ngrok', 'token123']) self.assertEqual(args.ngrok, 'token123') + + args = parser.parse_args(['--cloudflare', 'cf_token123']) + self.assertEqual(args.cloudflare, 'cf_token123') + + args = parser.parse_args(['--cloudflare_custom_domain', 'example.com']) + self.assertEqual(args.cloudflare_custom_domain, 'example.com') @patch('keepercommander.service.core.service_manager.ServiceManager') def test_execute_service_already_running(self, mock_service_manager): @@ -35,7 +41,7 @@ def test_execute_service_already_running(self, mock_service_manager): def test_handle_configuration_streamlined(self): """Test streamlined configuration handling.""" config_data = self.command.service_config.create_default_config() - args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y') + args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y') with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: self.command._handle_configuration(config_data, self.params, args) @@ -44,7 +50,7 @@ def test_handle_configuration_streamlined(self): def test_handle_configuration_interactive(self): """Test interactive configuration handling.""" config_data = self.command.service_config.create_default_config() - args = StreamlineArgs(port=None, commands=None, ngrok=None, allowedip='' ,deniedip='', ngrok_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled=None) + args = StreamlineArgs(port=None, commands=None, ngrok=None, allowedip='' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled=None) with patch.object(self.command.config_handler, 'handle_interactive_config') as mock_interactive, \ patch.object(self.command.security_handler, 'configure_security') as mock_security: @@ -55,7 +61,7 @@ def test_handle_configuration_interactive(self): def test_create_and_save_record(self): """Test record creation and saving.""" config_data = self.command.service_config.create_default_config() - args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y') + args = StreamlineArgs(port=8080, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y') with patch.object(self.command.service_config, 'create_record') as mock_create_record, \ patch.object(self.command.service_config, 'save_config') as mock_save_config: @@ -75,7 +81,7 @@ def test_create_and_save_record(self): def test_validation_error_handling(self): """Test handling of validation errors during execution.""" - args = StreamlineArgs(port=-1, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y') + args = StreamlineArgs(port=-1, commands='record-list', ngrok=None, allowedip='0.0.0.0' ,deniedip='', ngrok_custom_domain=None, cloudflare=None, cloudflare_custom_domain=None, certfile='', certpassword='', fileformat='json', run_mode='foreground', queue_enabled='y') with patch('builtins.print') as mock_print: with patch.object(self.command.service_config, 'create_default_config') as mock_create_config: @@ -84,5 +90,226 @@ def test_validation_error_handling(self): mock_print.assert_called() + def test_cloudflare_streamlined_configuration(self): + """Test streamlined configuration with Cloudflare tunnel.""" + config_data = self.command.service_config.create_default_config() + args = StreamlineArgs( + port=8080, + commands='record-list', + ngrok=None, + allowedip='0.0.0.0', + deniedip='', + ngrok_custom_domain=None, + cloudflare='cf_token123', + cloudflare_custom_domain='tunnel.example.com', + certfile='', + certpassword='', + fileformat='json', + run_mode='foreground', + queue_enabled='y' + ) + + with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: + self.command._handle_configuration(config_data, self.params, args) + mock_streamlined.assert_called_once_with(config_data, args, self.params) + + def test_cloudflare_validation_missing_token(self): + """Test validation error when Cloudflare token is missing but domain is provided.""" + args = StreamlineArgs( + port=8080, + commands='record-list', + ngrok=None, + allowedip='0.0.0.0', + deniedip='', + ngrok_custom_domain=None, + cloudflare=None, + cloudflare_custom_domain='tunnel.example.com', + certfile='', + certpassword='', + fileformat='json', + run_mode='foreground', + queue_enabled='y' + ) + + with patch('builtins.print') as mock_print: + with patch.object(self.command.service_config, 'create_default_config') as mock_create_config: + mock_create_config.return_value = {} + self.command.execute(self.params, cloudflare_custom_domain='tunnel.example.com') + mock_print.assert_called() + + def test_cloudflare_validation_missing_domain(self): + """Test validation error when Cloudflare domain is missing but token is provided.""" + args = StreamlineArgs( + port=8080, + commands='record-list', + ngrok=None, + allowedip='0.0.0.0', + deniedip='', + ngrok_custom_domain=None, + cloudflare='cf_token123', + cloudflare_custom_domain=None, + certfile='', + certpassword='', + fileformat='json', + run_mode='foreground', + queue_enabled='y' + ) + + with patch('builtins.print') as mock_print: + with patch.object(self.command.service_config, 'create_default_config') as mock_create_config: + mock_create_config.return_value = {} + self.command.execute(self.params, cloudflare='cf_token123') + mock_print.assert_called() + + def test_cloudflare_and_ngrok_mutual_exclusion(self): + """Test that Cloudflare and ngrok cannot be used together.""" + args = StreamlineArgs( + port=8080, + commands='record-list', + ngrok='ngrok_token123', + allowedip='0.0.0.0', + deniedip='', + ngrok_custom_domain='ngrok.example.com', + cloudflare='cf_token123', + cloudflare_custom_domain='tunnel.example.com', + certfile='', + certpassword='', + fileformat='json', + run_mode='foreground', + queue_enabled='y' + ) + + with patch('builtins.print') as mock_print: + with patch.object(self.command.service_config, 'create_default_config') as mock_create_config: + mock_create_config.return_value = {} + self.command.execute(self.params, ngrok='ngrok_token123', cloudflare='cf_token123') + mock_print.assert_called() + + @patch('keepercommander.service.config.cloudflare_config.CloudflareConfigurator.configure_cloudflare') + def test_cloudflare_tunnel_startup_success(self, mock_cloudflare_configure): + """Test successful Cloudflare tunnel startup.""" + config_data = self.command.service_config.create_default_config() + config_data.update({ + 'cloudflare': 'y', + 'cloudflare_tunnel_token': 'cf_token123', + 'cloudflare_custom_domain': 'tunnel.example.com', + 'port': 8080 + }) + + mock_cloudflare_configure.return_value = 12345 # Mock PID + + args = StreamlineArgs( + port=8080, + commands='record-list', + ngrok=None, + allowedip='0.0.0.0', + deniedip='', + ngrok_custom_domain=None, + cloudflare='cf_token123', + cloudflare_custom_domain='tunnel.example.com', + certfile='', + certpassword='', + fileformat='json', + run_mode='foreground', + queue_enabled='y' + ) + + with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: + self.command._handle_configuration(config_data, self.params, args) + mock_streamlined.assert_called_once_with(config_data, args, self.params) + + @patch('keepercommander.service.core.globals.init_globals') + @patch('keepercommander.service.core.service_manager.ServiceManager.start_service') + @patch('keepercommander.service.core.service_manager.ServiceManager.get_status') + def test_cloudflare_tunnel_startup_failure(self, mock_get_status, mock_start_service, mock_init_globals): + """Test Cloudflare tunnel startup failure due to firewall.""" + # Mock that service is not already running + mock_get_status.return_value = "Commander Service is not running" + + # Mock service startup failure due to Cloudflare tunnel issues + mock_start_service.side_effect = Exception("Commander Service failed to start: Cloudflare tunnel failed to connect. This is likely due to firewall/proxy blocking the connection.") + + with patch('builtins.print') as mock_print: + with patch.object(self.command.service_config, 'create_default_config') as mock_create_config: + with patch.object(self.command.service_config, 'create_record') as mock_create_record: + with patch.object(self.command.service_config, 'save_config') as mock_save_config: + with patch.object(self.command.service_config, 'update_or_add_record') as mock_update_record: + with patch.object(self.command.service_config.validator, 'validate_cloudflare_token') as mock_validate_token: + mock_create_config.return_value = { + 'is_advanced_security_enabled': 'n', + 'fileformat': 'json' + } + mock_create_record.return_value = {'api-key': 'test-key'} + mock_validate_token.return_value = 'cf_token123' # Mock valid token + + # This should trigger the exception handling in execute() + self.command.execute( + self.params, + port=8080, + allowedip='0.0.0.0', + deniedip='', + commands='record-list', + ngrok=None, + ngrok_custom_domain=None, + cloudflare='cf_token123', + cloudflare_custom_domain='tunnel.example.com', + certfile='', + certpassword='', + fileformat='json', + run_mode='foreground', + queue_enabled='y' + ) + + # Verify that the error was printed + mock_print.assert_called_with("Unexpected error: Commander Service failed to start: Cloudflare tunnel failed to connect. This is likely due to firewall/proxy blocking the connection.") + + def test_cloudflare_token_validation(self): + """Test Cloudflare token format validation.""" + # Test valid token format + args = StreamlineArgs( + port=8080, + commands='record-list', + ngrok=None, + allowedip='0.0.0.0', + deniedip='', + ngrok_custom_domain=None, + cloudflare='eyJhIjoiYWJjZGVmZ2hpams', # Base64-like token + cloudflare_custom_domain='tunnel.example.com', + certfile='', + certpassword='', + fileformat='json', + run_mode='foreground', + queue_enabled='y' + ) + + with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: + config_data = self.command.service_config.create_default_config() + self.command._handle_configuration(config_data, self.params, args) + mock_streamlined.assert_called_once_with(config_data, args, self.params) + + def test_cloudflare_domain_validation(self): + """Test Cloudflare custom domain validation.""" + # Test valid domain format + args = StreamlineArgs( + port=8080, + commands='record-list', + ngrok=None, + allowedip='0.0.0.0', + deniedip='', + ngrok_custom_domain=None, + cloudflare='cf_token123', + cloudflare_custom_domain='my-tunnel.example.com', + certfile='', + certpassword='', + fileformat='json', + run_mode='foreground', + queue_enabled='y' + ) + + with patch.object(self.command.config_handler, 'handle_streamlined_config') as mock_streamlined: + config_data = self.command.service_config.create_default_config() + self.command._handle_configuration(config_data, self.params, args) + mock_streamlined.assert_called_once_with(config_data, args, self.params) + if __name__ == '__main__': unittest.main() \ No newline at end of file diff --git a/unit-tests/service/test_response_parser.py b/unit-tests/service/test_response_parser.py index a56de0c1d..f7bcf3671 100644 --- a/unit-tests/service/test_response_parser.py +++ b/unit-tests/service/test_response_parser.py @@ -39,15 +39,15 @@ def test_parse_tree_command(self): self.assertEqual(result['status'], 'success') self.assertEqual(result['command'], 'tree') - self.assertEqual(len(result['data']), 4) + self.assertEqual(len(result['data']['tree']), 4) # Updated: now returns dict with 'tree' key - self.assertEqual(result['data'][0]['level'], 0) - self.assertEqual(result['data'][0]['name'], 'Root') - self.assertEqual(result['data'][0]['path'], 'Root') + self.assertEqual(result['data']['tree'][0]['level'], 0) + self.assertEqual(result['data']['tree'][0]['name'], 'Root') + self.assertEqual(result['data']['tree'][0]['path'], 'Root') - self.assertEqual(result['data'][2]['level'], 2) - self.assertEqual(result['data'][2]['name'], 'SubFolder1') - self.assertEqual(result['data'][2]['path'], 'Root/Folder1/SubFolder1') + self.assertEqual(result['data']['tree'][1]['level'], 0) + self.assertEqual(result['data']['tree'][1]['name'], 'Folder1') + self.assertEqual(result['data']['tree'][1]['path'], 'Folder1') def test_parse_mkdir_command(self): """Test parsing of 'mkdir' command output"""