SSH Private Keys Accidentally Committed to Git Repositories

Critical Risk Secrets Exposure
sshgitprivate-keysdeployment-keysversion-controlserver-accessinfrastructurecryptographic-keys

What it is

A critical security vulnerability where SSH private keys, including deployment keys, user keys, and server access keys, are accidentally committed to Git repositories. This exposes sensitive cryptographic keys that provide direct access to servers, services, and repositories, allowing unauthorized access to production systems and potential complete infrastructure compromise.

#!/bin/bash
# VULNERABLE: Deployment script with embedded SSH keys

# deploy_infrastructure.sh - Contains multiple private keys
set -e

# VULNERABLE: Production server SSH key embedded in script
PROD_SSH_KEY="-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA4f6wWyEd2JH4mKqVl4jfP2YQw5xGvE7mK8pL2cR4nX9dV3m
5xBHj9E2y8kL5+nR4dJ8fV9qL3xW4ePKsOr2JHfCbYtG6H4Wq7iI8zJ4bZ8A9x
Y3nU4K7gP5oA1L8gK7z2jY4Q1fHdF5mL9dV2xK8pR7tM4jQ3lA8fE6nH9dJ4cF5=
-----END RSA PRIVATE KEY-----"

# VULNERABLE: Staging server SSH key
STAGING_SSH_KEY="-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABFwAAAAdzc2gtcn
NhAAAAAwEAAQAAAQEAz8yJDfW2JH4mKqVl4jfP2YQw5xGvE7mK8pL2cR4nX9dV3m
5xBHj9E2y8kL5+nR4dJ8fV9qL3xW4ePKsOr2JHfCbYtG6H4Wq7iI8zJ4bZ8A9x
Y3nU4K7gP5oA1L8gK7z2jY4Q1fHdF5mL9dV2xK8pR7tM4jQ3lA8fE6nH9dJ4cF5=
-----END OPENSSH PRIVATE KEY-----"

# VULNERABLE: Database server access key
DB_SSH_KEY="-----BEGIN ED25519 PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACDTvOyBKXLs8pK4JGWv3RRxRKlYAEwFh9Hd7RvD5Y+R8gAAAABJ4+2PKeP
tjwAAAAtzc2gtZWQyNTUxOQAAACDTvOyBKXLs8pK4JGWv3RRxRKlYAEwFh9Hd7RvD5Y
+R8gAAAABJs4K8P7TYT3J2hGJtM4B6fD8+EpFHEAGzJ9Kw/R2hYf8g1Lzsg=
-----END ED25519 PRIVATE KEY-----"

echo "Starting infrastructure deployment..."

# Function to setup SSH key for deployment
setup_ssh_key() {
    local key_content="$1"
    local key_file="$2"
    
    # VULNERABLE: Writing private key to disk
    echo "$key_content" > "$key_file"
    chmod 600 "$key_file"
    echo "SSH key written to $key_file"
}

# Function to deploy to server
deploy_to_server() {
    local server="$1"
    local key_file="$2"
    local app_version="$3"
    
    echo "Deploying to $server using key $key_file"
    
    # VULNERABLE: SSH commands that expose key paths in process lists
    ssh -i "$key_file" -o StrictHostKeyChecking=no "deploy@$server" \
        "sudo systemctl stop myapp"
    
    ssh -i "$key_file" -o StrictHostKeyChecking=no "deploy@$server" \
        "sudo /opt/deploy/update_app.sh $app_version"
    
    ssh -i "$key_file" -o StrictHostKeyChecking=no "deploy@$server" \
        "sudo systemctl start myapp"
    
    echo "Deployment to $server completed"
}

# Main deployment logic
main() {
    local app_version="${1:-latest}"
    
    # Create temporary directory for keys
    KEY_DIR="/tmp/deploy_keys_$$"
    mkdir -p "$KEY_DIR"
    
    # Setup SSH keys for different environments
    setup_ssh_key "$PROD_SSH_KEY" "$KEY_DIR/prod_key"
    setup_ssh_key "$STAGING_SSH_KEY" "$KEY_DIR/staging_key"
    setup_ssh_key "$DB_SSH_KEY" "$KEY_DIR/db_key"
    
    # Deploy to production servers
    echo "Deploying to production..."
    deploy_to_server "prod1.company.com" "$KEY_DIR/prod_key" "$app_version"
    deploy_to_server "prod2.company.com" "$KEY_DIR/prod_key" "$app_version"
    
    # Deploy to staging
    echo "Deploying to staging..."
    deploy_to_server "staging.company.com" "$KEY_DIR/staging_key" "$app_version"
    
    # Update database
    echo "Updating database..."
    ssh -i "$KEY_DIR/db_key" -o StrictHostKeyChecking=no "dbadmin@db.company.com" \
        "sudo /opt/db/migrate.sh $app_version"
    
    # VULNERABLE: Cleanup may fail, leaving keys on disk
    rm -rf "$KEY_DIR"
    
    echo "Deployment completed successfully!"
}

# VULNERABLE: Keys remain in this script and git history
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
    main "$@"
fi

# ansible/deploy.yml - VULNERABLE: Ansible playbook with embedded keys
---
- name: Deploy Application
  hosts: production
  vars:
    # VULNERABLE: SSH private key in Ansible variable
    deployment_ssh_key: |
      -----BEGIN RSA PRIVATE KEY-----
      MIIEowIBAAKCAQEA4f6wWyEd2JH4mKqVl4jfP2YQw5xGvE7mK8pL2cR4nX9dV3m
      ... [full private key content] ...
      -----END RSA PRIVATE KEY-----
    
    app_version: "{{ version | default('latest') }}"
    
  tasks:
    - name: Write deployment key to target server
      copy:
        content: "{{ deployment_ssh_key }}"
        dest: /tmp/deploy_key
        mode: '0600'
        owner: deploy
        group: deploy
      
    - name: Deploy application
      shell: |
        /opt/deploy/deploy_app.sh {{ app_version }}
      become: yes
      
    - name: Remove deployment key
      file:
        path: /tmp/deploy_key
        state: absent

# terraform/ssh_keys.tf - VULNERABLE: Terraform with embedded keys
resource "aws_instance" "app_server" {
  ami           = "ami-12345678"
  instance_type = "t3.medium"
  key_name      = aws_key_pair.app_key.key_name
  
  # VULNERABLE: Private key in Terraform configuration
  connection {
    type        = "ssh"
    user        = "ubuntu"
    private_key = "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQ...\n-----END RSA PRIVATE KEY-----"
    host        = self.public_ip
  }
  
  provisioner "file" {
    source      = "deploy.sh"
    destination = "/tmp/deploy.sh"
  }
}
#!/bin/bash
# SECURE: Deployment script using proper key management

# secure_deploy.sh - No embedded keys, uses secure practices
set -euo pipefail  # Strict error handling

# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CONFIG_DIR="${SCRIPT_DIR}/config"
LOG_FILE="/var/log/deploy/deploy_$(date +%Y%m%d_%H%M%S).log"

# Logging function
log() {
    echo "[$(date +'%Y-%m-%d %H:%M:%S')] $*" | tee -a "$LOG_FILE"
}

# SECURE: Validate environment and prerequisites
validate_environment() {
    log "Validating deployment environment..."
    
    # Check required environment variables
    local required_vars=("DEPLOYMENT_ENV" "APP_VERSION" "SSH_KEY_STORAGE_PATH")
    local missing_vars=()
    
    for var in "${required_vars[@]}"; do
        if [[ -z "${!var:-}" ]]; then
            missing_vars+=("$var")
        fi
    done
    
    if [[ ${#missing_vars[@]} -gt 0 ]]; then
        log "ERROR: Missing required environment variables: ${missing_vars[*]}"
        log "Please set: ${missing_vars[*]}"
        exit 1
    fi
    
    # Validate SSH key storage path exists and has proper permissions
    if [[ ! -d "$SSH_KEY_STORAGE_PATH" ]]; then
        log "ERROR: SSH key storage directory not found: $SSH_KEY_STORAGE_PATH"
        exit 1
    fi
    
    # Check directory permissions (should be 700)
    local dir_perms
    dir_perms=$(stat -c %a "$SSH_KEY_STORAGE_PATH")
    if [[ "$dir_perms" != "700" ]]; then
        log "WARNING: SSH key storage directory has insecure permissions: $dir_perms"
        log "Recommended: chmod 700 $SSH_KEY_STORAGE_PATH"
    fi
    
    # Validate SSH agent or available keys
    if [[ -n "${SSH_AUTH_SOCK:-}" ]]; then
        log "Using SSH agent for authentication"
        ssh-add -l > /dev/null || {
            log "ERROR: No SSH keys loaded in agent"
            exit 1
        }
    else
        log "SSH agent not available, will use key files"
    fi
    
    log "Environment validation completed"
}

# SECURE: Get SSH key path with validation
get_ssh_key_path() {
    local env="$1"
    local key_name="$2"
    local key_path="$SSH_KEY_STORAGE_PATH/$env/${key_name}_key"
    
    if [[ ! -f "$key_path" ]]; then
        log "ERROR: SSH key not found: $key_path"
        return 1
    fi
    
    # Validate key permissions
    local key_perms
    key_perms=$(stat -c %a "$key_path")
    if [[ "$key_perms" != "600" && "$key_perms" != "400" ]]; then
        log "ERROR: SSH key has insecure permissions: $key_path ($key_perms)"
        return 1
    fi
    
    echo "$key_path"
}

# SECURE: Execute SSH command with proper error handling
execute_ssh_command() {
    local host="$1"
    local command="$2"
    local key_path="${3:-}"
    local user="${4:-deploy}"
    
    local ssh_opts=(
        "-o" "StrictHostKeyChecking=yes"
        "-o" "UserKnownHostsFile=$HOME/.ssh/known_hosts"
        "-o" "ConnectTimeout=10"
        "-o" "ServerAliveInterval=60"
        "-o" "ServerAliveCountMax=3"
        "-o" "LogLevel=ERROR"  # Reduce verbosity to avoid key exposure
    )
    
    # Add key if not using SSH agent
    if [[ -n "$key_path" && -z "${SSH_AUTH_SOCK:-}" ]]; then
        ssh_opts+=("-i" "$key_path")
    fi
    
    log "Executing command on $host: $(echo "$command" | cut -c1-50)..."
    
    # Execute with timeout and proper error handling
    if timeout 300 ssh "${ssh_opts[@]}" "$user@$host" "$command"; then
        log "Command executed successfully on $host"
        return 0
    else
        log "ERROR: Command failed on $host"
        return 1
    fi
}

# SECURE: Deploy to server with rollback capability
deploy_to_server() {
    local server="$1"
    local env="$2"
    local app_version="$3"
    
    log "Starting deployment to $server (env: $env, version: $app_version)"
    
    # Get SSH key path
    local key_path
    if [[ -z "${SSH_AUTH_SOCK:-}" ]]; then
        key_path=$(get_ssh_key_path "$env" "deploy") || return 1
    fi
    
    # Pre-deployment health check
    if ! execute_ssh_command "$server" "systemctl is-active --quiet myapp" "$key_path"; then
        log "WARNING: Application not running on $server before deployment"
    fi
    
    # Create deployment backup point
    local backup_cmd="sudo /opt/deploy/create_backup.sh $app_version"
    if ! execute_ssh_command "$server" "$backup_cmd" "$key_path"; then
        log "ERROR: Failed to create backup on $server"
        return 1
    fi
    
    # Execute deployment steps
    local deployment_steps=(
        "sudo systemctl stop myapp"
        "sudo /opt/deploy/update_app.sh $app_version"
        "sudo systemctl start myapp"
        "sudo /opt/deploy/health_check.sh"
    )
    
    for step in "${deployment_steps[@]}"; do
        if ! execute_ssh_command "$server" "$step" "$key_path"; then
            log "ERROR: Deployment step failed on $server: $step"
            log "Initiating rollback..."
            
            # Attempt rollback
            execute_ssh_command "$server" "sudo /opt/deploy/rollback.sh" "$key_path" || {
                log "CRITICAL: Rollback failed on $server"
            }
            return 1
        fi
    done
    
    # Post-deployment verification
    if execute_ssh_command "$server" "sudo /opt/deploy/verify_deployment.sh" "$key_path"; then
        log "Deployment to $server completed successfully"
        return 0
    else
        log "ERROR: Post-deployment verification failed on $server"
        return 1
    fi
}

# SECURE: Main deployment function
main() {
    local app_version="${1:-$APP_VERSION}"
    local deployment_env="${DEPLOYMENT_ENV:-staging}"
    
    log "Starting secure deployment process"
    log "Environment: $deployment_env"
    log "Application version: $app_version"
    
    # Validate environment
    validate_environment
    
    # Load server configuration
    local config_file="$CONFIG_DIR/servers_${deployment_env}.conf"
    if [[ ! -f "$config_file" ]]; then
        log "ERROR: Server configuration not found: $config_file"
        exit 1
    fi
    
    # Read server list (one server per line)
    local servers=()
    while IFS= read -r server; do
        [[ -n "$server" && ! "$server" =~ ^#.* ]] && servers+=("$server")
    done < "$config_file"
    
    if [[ ${#servers[@]} -eq 0 ]]; then
        log "ERROR: No servers configured for environment: $deployment_env"
        exit 1
    fi
    
    log "Deploying to ${#servers[@]} servers: ${servers[*]}"
    
    # Deploy to each server
    local failed_servers=()
    for server in "${servers[@]}"; do
        if ! deploy_to_server "$server" "$deployment_env" "$app_version"; then
            failed_servers+=("$server")
        fi
    done
    
    # Report results
    if [[ ${#failed_servers[@]} -eq 0 ]]; then
        log "Deployment completed successfully on all servers"
        exit 0
    else
        log "FAILURE: Deployment failed on servers: ${failed_servers[*]}"
        log "Please check logs and consider manual intervention"
        exit 1
    fi
}

# Ensure script is executable only by owner
if [[ $(stat -c %a "$0") != "700" ]]; then
    log "WARNING: Deployment script should have restrictive permissions (700)"
fi

# Execute main function if script is run directly
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
    main "$@"
fi

# config/servers_production.conf - Server configuration (no keys)
prod1.company.com
prod2.company.com
prod3.company.com

# config/servers_staging.conf
staging.company.com

# ansible/secure_deploy.yml - SECURE: Ansible playbook without embedded keys
---
- name: Secure Application Deployment
  hosts: "{{ deployment_env | default('staging') }}"
  become: no
  vars:
    app_version: "{{ version | default('latest') }}"
    deployment_user: "{{ ansible_user | default('deploy') }}"
    
  pre_tasks:
    - name: Validate deployment environment
      assert:
        that:
          - deployment_env is defined
          - app_version is defined
          - ansible_ssh_private_key_file is defined or ansible_ssh_common_args is defined
        fail_msg: "Required deployment variables not set"
    
    - name: Create deployment log directory
      file:
        path: /var/log/deploy
        state: directory
        mode: '0755'
        owner: "{{ deployment_user }}"
      become: yes
  
  tasks:
    - name: Check application status before deployment
      systemd:
        name: myapp
        state: started
      register: app_status_before
      ignore_errors: true
    
    - name: Create application backup
      command: /opt/deploy/create_backup.sh {{ app_version }}
      become: yes
      register: backup_result
    
    - name: Stop application for deployment
      systemd:
        name: myapp
        state: stopped
      become: yes
    
    - name: Deploy application
      command: /opt/deploy/update_app.sh {{ app_version }}
      become: yes
      register: deploy_result
    
    - name: Start application after deployment
      systemd:
        name: myapp
        state: started
        enabled: yes
      become: yes
    
    - name: Verify deployment
      command: /opt/deploy/verify_deployment.sh
      register: verify_result
      retries: 3
      delay: 10
    
    - name: Log deployment success
      lineinfile:
        path: /var/log/deploy/deployment.log
        line: "{{ ansible_date_time.iso8601 }}: Successful deployment of {{ app_version }} to {{ inventory_hostname }}"
        create: yes
      become: yes
  
  rescue:
    - name: Rollback on deployment failure
      command: /opt/deploy/rollback.sh
      become: yes
      ignore_errors: true
    
    - name: Log deployment failure
      lineinfile:
        path: /var/log/deploy/deployment.log
        line: "{{ ansible_date_time.iso8601 }}: FAILED deployment of {{ app_version }} to {{ inventory_hostname }}"
        create: yes
      become: yes
    
    - name: Fail deployment
      fail:
        msg: "Deployment failed on {{ inventory_hostname }}, rollback attempted"

# terraform/secure_infrastructure.tf - SECURE: Terraform without embedded keys
terraform {
  required_version = ">= 1.0"
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "~> 5.0"
    }
  }
}

# Use AWS key pair resource without embedding private key
resource "aws_key_pair" "app_deployment" {
  key_name   = "app-deployment-${var.environment}"
  public_key = file(var.ssh_public_key_path)  # Path to public key file
  
  tags = {
    Name        = "App Deployment Key - ${var.environment}"
    Environment = var.environment
    ManagedBy   = "terraform"
  }
}

resource "aws_instance" "app_server" {
  count                  = var.server_count
  ami                   = var.app_ami_id
  instance_type         = var.instance_type
  key_name              = aws_key_pair.app_deployment.key_name
  vpc_security_group_ids = [aws_security_group.app_sg.id]
  subnet_id             = var.subnet_ids[count.index % length(var.subnet_ids)]
  
  # Security hardening
  monitoring                          = true
  associate_public_ip_address        = false
  instance_initiated_shutdown_behavior = "stop"
  
  # Use remote provisioner with external key management
  provisioner "remote-exec" {
    connection {
      type        = "ssh"
      user        = "ubuntu"
      host        = self.private_ip
      # Private key is managed externally, not in Terraform
      # Use SSH agent or external key management
    }
    
    inline = [
      "sudo apt-get update",
      "sudo apt-get install -y docker.io",
      "sudo systemctl enable docker",
      "sudo systemctl start docker"
    ]
  }
  
  tags = {
    Name        = "app-server-${count.index + 1}-${var.environment}"
    Environment = var.environment
    Role        = "application"
  }
}

# variables.tf
variable "ssh_public_key_path" {
  description = "Path to SSH public key file"
  type        = string
  # No default value - must be provided
}

variable "environment" {
  description = "Deployment environment"
  type        = string
  validation {
    condition     = contains(["staging", "production"], var.environment)
    error_message = "Environment must be either 'staging' or 'production'."
  }
}

# Usage: terraform apply -var="ssh_public_key_path=/secure/keys/production/deploy_key.pub"

💡 Why This Fix Works

The vulnerable infrastructure scripts embed SSH private keys directly in code, making them visible in version control and process lists. The secure version implements proper key management using external key storage, SSH agent forwarding, certificate-based authentication, and comprehensive error handling without exposing sensitive keys.

Why it happens

Developers often accidentally commit their personal SSH private keys when setting up new projects or copying configuration files. This commonly happens when developers include entire .ssh directories, copy id_rsa files to project directories for convenience, or when IDE backup features include SSH key directories in project archives.

Root causes

Developer SSH Keys Accidentally Committed

Developers often accidentally commit their personal SSH private keys when setting up new projects or copying configuration files. This commonly happens when developers include entire .ssh directories, copy id_rsa files to project directories for convenience, or when IDE backup features include SSH key directories in project archives.

Preview example – TEXT
# .ssh/id_rsa - VULNERABLE: Private key accidentally committed
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA7yXyaBC8jqaJJ4h6d8J2CvFyFvGhPJzQdW5xBHj9E2y8kL5+
nR4dJ8fV9qL3xW4ePKsOr2JHfCbYtG6H4Wq7iI8zJ4bZ8A9xY3nU4K7gP5oA1L
... [512 lines of private key content] ...
8gK7z2jY4Q1fHdF5mL9dV2xK8pR7tM4jQ3lA8fE6nH9dJ4cF5=
-----END RSA PRIVATE KEY-----

# This key is now visible to anyone with repository access!
# Even worse, it remains in git history forever unless properly cleaned

Deployment and Infrastructure Keys in Repository

Infrastructure automation scripts, deployment configurations, and CI/CD pipelines often contain SSH private keys for server access, deployment processes, or inter-service communication. These keys get committed to repositories as part of infrastructure-as-code projects, deployment scripts, or configuration management files.

Preview example – TEXT
# deploy/keys/production_deploy_key - VULNERABLE deployment key
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABFwAAAAdzc2gtcn
NhAAAAAwEAAQAAAQEAz8yJDfW2JH4mKqVl4jfP2YQw5xGvE7mK8pL2cR4nX9dV3m
... [deployment key content] ...
AAAWEAl2F5dJ7nM8qL4gK9fE2xH6dN4cF8jP5mQ7lB9rT3wA
-----END OPENSSH PRIVATE KEY-----

# ansible/inventory/production/ssh_keys/app_server_key
-----BEGIN RSA PRIVATE KEY-----
# Production server access key - NEVER commit this!
MIIEogIBAAKCAQEAzQw9pR7fJ4mK8dL2cR4nX9dV3m5xBHj9E2y8kL5+nR4d...
... [production server key] ...
-----END RSA PRIVATE KEY-----

# docker/deploy/ssh/container_key
# SSH key for container deployments - committed by mistake
-----BEGIN OPENSSH PRIVATE KEY-----
... [container deployment key] ...
-----END OPENSSH PRIVATE KEY-----

Configuration Files with Embedded Private Keys

SSH private keys are sometimes embedded directly in configuration files, shell scripts, or application configuration for automated processes. These files get committed as part of the application codebase, especially in projects that handle server provisioning, automated deployments, or service-to-service communication.

Preview example – BASH
# config/ssh_config.py - VULNERABLE: SSH key in application config
SSH_PRIVATE_KEY = '''-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA4f6wWyEd2JH4mKqVl4jfP2YQw5xGvE7mK8pL2cR4nX9dV3m
5xBHj9E2y8kL5+nR4dJ8fV9qL3xW4ePKsOr2JHfCbYtG6H4Wq7iI8zJ4bZ8A9x
... [embedded private key] ...
Y3nU4K7gP5oA1L8gK7z2jY4Q1fHdF5mL9dV2xK8pR7tM4jQ3lA8fE6nH9dJ4cF5=
-----END RSA PRIVATE KEY-----'''

# Shell script with embedded key
#!/bin/bash
# deploy.sh - VULNERABLE deployment script

# SSH key embedded in script - NEVER do this!
SSH_KEY="-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA4f6wWyEd2JH4mKqVl4jfP2YQw5xGvE7mK8pL2cR4nX9dV3m
...
-----END RSA PRIVATE KEY-----"

# Write key to temporary file for use
echo "$SSH_KEY" > /tmp/deploy_key
chmod 600 /tmp/deploy_key
ssh -i /tmp/deploy_key user@production-server "deploy-command"
rm /tmp/deploy_key

Fixes

1

Remove Keys from Git History and Use Proper Secret Management

Immediately remove SSH private keys from Git history using tools like git-filter-branch, BFG Repo-Cleaner, or git filter-repo. Replace exposed keys with new ones and implement proper secret management using environment variables, secret management services, or encrypted key storage. Never store private keys in version control.

View implementation – BASH
# SECURE: Remove keys from Git history and implement proper practices

# 1. Remove sensitive files from Git history using BFG Repo-Cleaner
# Download bfg.jar from https://rtyley.github.io/bfg-repo-cleaner/
java -jar bfg.jar --delete-files id_rsa
java -jar bfg.jar --delete-files "*.pem"
java -jar bfg.jar --delete-folders .ssh
git reflog expire --expire=now --all && git gc --prune=now --aggressive

# 2. Alternative: Use git filter-repo (recommended)
git filter-repo --path .ssh --invert-paths
git filter-repo --path "*/id_rsa" --invert-paths
git filter-repo --path "*_key" --invert-paths
git filter-repo --path "*.pem" --invert-paths

# 3. Create comprehensive .gitignore
echo "# SSH Keys - NEVER commit these" >> .gitignore
echo "*.pem" >> .gitignore
echo "*_key" >> .gitignore
echo "*_key.pub" >> .gitignore
echo "id_rsa*" >> .gitignore
echo "id_ed25519*" >> .gitignore
echo "id_ecdsa*" >> .gitignore
echo ".ssh/" >> .gitignore
echo "keys/" >> .gitignore
echo "secrets/" >> .gitignore

# 4. Set up pre-commit hooks to prevent future commits
#!/bin/bash
# .git/hooks/pre-commit - Prevent committing sensitive files
set -e

# Check for SSH private keys
if git diff --cached --name-only | grep -E "(id_rsa|id_ed25519|id_ecdsa|.*_key|.*\.pem)$"; then
    echo "ERROR: Attempting to commit SSH private key!"
    echo "Private keys should never be committed to Git."
    exit 1
fi

# Check for private key content in files
if git diff --cached | grep -E "BEGIN (RSA|OPENSSH|EC) PRIVATE KEY"; then
    echo "ERROR: Private key content detected in commit!"
    echo "Remove private keys before committing."
    exit 1
fi

echo "Pre-commit check passed: No private keys detected"

# Make the hook executable
chmod +x .git/hooks/pre-commit
2

Implement Secure SSH Key Management for Deployments

Use proper deployment key management strategies including separate deployment keys per environment, SSH agent forwarding, encrypted key storage, and secure key distribution mechanisms. Store keys outside of version control and use configuration management or secret management services to distribute them securely.

View implementation – PYTHON
# SECURE: Proper SSH key management for deployments

# deploy_manager.py - Secure deployment key management
import os
import tempfile
import subprocess
from pathlib import Path
from contextlib import contextmanager

class SecureSSHKeyManager:
    def __init__(self):
        self.key_storage_path = os.getenv('SSH_KEY_STORAGE_PATH', '/secure/keys')
        self.validate_environment()
    
    def validate_environment(self):
        """Validate SSH key management environment"""
        required_vars = ['SSH_KEY_STORAGE_PATH', 'DEPLOYMENT_ENV']
        missing = [var for var in required_vars if not os.getenv(var)]
        if missing:
            raise EnvironmentError(f"Missing required environment variables: {missing}")
    
    @contextmanager
    def get_deployment_key(self, environment, key_name):
        """Securely provide deployment key for temporary use"""
        key_path = Path(self.key_storage_path) / environment / f"{key_name}_key"
        
        if not key_path.exists():
            raise FileNotFoundError(f"Deployment key not found: {key_path}")
        
        # Create temporary key file with secure permissions
        with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.key') as temp_key:
            with open(key_path, 'r') as source_key:
                temp_key.write(source_key.read())
            temp_key_path = temp_key.name
        
        try:
            # Set secure permissions
            os.chmod(temp_key_path, 0o600)
            yield temp_key_path
        finally:
            # Always clean up temporary key
            if os.path.exists(temp_key_path):
                os.unlink(temp_key_path)
    
    def execute_ssh_command(self, host, command, environment='production', key_name='deploy'):
        """Execute SSH command with managed key"""
        with self.get_deployment_key(environment, key_name) as key_path:
            ssh_cmd = [
                'ssh',
                '-i', key_path,
                '-o', 'StrictHostKeyChecking=yes',
                '-o', 'UserKnownHostsFile=/etc/ssh/known_hosts',
                '-o', 'ConnectTimeout=10',
                f'{host}',
                command
            ]
            
            try:
                result = subprocess.run(
                    ssh_cmd, 
                    capture_output=True, 
                    text=True, 
                    timeout=300
                )
                return result
            except subprocess.TimeoutExpired:
                raise TimeoutError(f"SSH command timed out: {command}")
            except subprocess.CalledProcessError as e:
                raise RuntimeError(f"SSH command failed: {e}")

# deployment_script.py - Secure deployment implementation
#!/usr/bin/env python3
import argparse
import logging
import sys
from deploy_manager import SecureSSHKeyManager

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def deploy_application(environment, version):
    """Deploy application using secure SSH key management"""
    key_manager = SecureSSHKeyManager()
    
    # Define deployment targets based on environment
    targets = {
        'staging': ['staging-1.company.com', 'staging-2.company.com'],
        'production': ['prod-1.company.com', 'prod-2.company.com']
    }
    
    if environment not in targets:
        raise ValueError(f"Unknown environment: {environment}")
    
    logger.info(f"Starting deployment to {environment} environment")
    
    deployment_commands = [
        'sudo systemctl stop application',
        f'sudo /opt/deploy/update_application.sh {version}',
        'sudo systemctl start application',
        'sudo systemctl status application'
    ]
    
    for host in targets[environment]:
        logger.info(f"Deploying to {host}")
        
        for command in deployment_commands:
            try:
                result = key_manager.execute_ssh_command(host, command, environment)
                if result.returncode != 0:
                    logger.error(f"Command failed on {host}: {command}")
                    logger.error(f"Error output: {result.stderr}")
                    return False
                else:
                    logger.info(f"Command successful on {host}: {command}")
            except Exception as e:
                logger.error(f"SSH execution failed for {host}: {e}")
                return False
    
    logger.info(f"Deployment to {environment} completed successfully")
    return True

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Secure deployment script')
    parser.add_argument('--environment', required=True, choices=['staging', 'production'])
    parser.add_argument('--version', required=True, help='Application version to deploy')
    
    args = parser.parse_args()
    
    try:
        success = deploy_application(args.environment, args.version)
        sys.exit(0 if success else 1)
    except Exception as e:
        logger.error(f"Deployment failed: {e}")
        sys.exit(1)

# Docker deployment with secure key handling
# docker-compose.yml - SECURE: External key management
version: '3.8'
services:
  deploy-agent:
    image: deploy-agent:latest
    volumes:
      # Mount secure key directory (read-only)
      - /secure/ssh-keys:/keys:ro
      # Mount known_hosts for security
      - /etc/ssh/known_hosts:/etc/ssh/known_hosts:ro
    environment:
      - SSH_KEY_STORAGE_PATH=/keys
      - DEPLOYMENT_ENV=${DEPLOYMENT_ENV}
    security_opt:
      - no-new-privileges:true
    user: "1000:1000"  # Non-root user
3

Use SSH Agent Forwarding and Certificate-Based Authentication

Implement SSH agent forwarding for development workflows and SSH certificate-based authentication for production systems. This eliminates the need to store private keys on servers or in configuration files, providing better security and centralized key management through SSH Certificate Authorities.

View implementation – PYTHON
# SECURE: SSH agent forwarding and certificate-based authentication setup

# ssh_ca_manager.py - SSH Certificate Authority management
import os
import subprocess
import tempfile
from datetime import datetime, timedelta
from pathlib import Path

class SSHCertificateManager:
    def __init__(self, ca_key_path, ca_pub_key_path):
        self.ca_key_path = Path(ca_key_path)
        self.ca_pub_key_path = Path(ca_pub_key_path)
        self.validate_ca_keys()
    
    def validate_ca_keys(self):
        """Validate CA key pair exists and has proper permissions"""
        if not self.ca_key_path.exists():
            raise FileNotFoundError(f"CA private key not found: {self.ca_key_path}")
        
        if not self.ca_pub_key_path.exists():
            raise FileNotFoundError(f"CA public key not found: {self.ca_pub_key_path}")
        
        # Check permissions on CA private key
        ca_key_stat = os.stat(self.ca_key_path)
        if ca_key_stat.st_mode & 0o077:  # Should be 600 or 400
            raise PermissionError(f"CA private key has insecure permissions: {oct(ca_key_stat.st_mode)}")
    
    def create_user_certificate(self, user_key_path, username, valid_hours=24, principals=None):
        """Create SSH user certificate"""
        principals = principals or [username]
        
        # Generate certificate
        cert_path = f"{user_key_path}-cert.pub"
        valid_from = datetime.now().strftime('%Y%m%d%H%M%S')
        valid_to = (datetime.now() + timedelta(hours=valid_hours)).strftime('%Y%m%d%H%M%S')
        
        cmd = [
            'ssh-keygen',
            '-s', str(self.ca_key_path),
            '-I', f"{username}-{valid_from}",
            '-n', ','.join(principals),
            '-V', f"{valid_from}:{valid_to}",
            '-z', str(int(datetime.now().timestamp())),
            user_key_path
        ]
        
        try:
            result = subprocess.run(cmd, capture_output=True, text=True, check=True)
            return cert_path
        except subprocess.CalledProcessError as e:
            raise RuntimeError(f"Certificate creation failed: {e.stderr}")
    
    def create_host_certificate(self, host_key_path, hostname, valid_days=365):
        """Create SSH host certificate"""
        cert_path = f"{host_key_path}-cert.pub"
        valid_from = datetime.now().strftime('%Y%m%d%H%M%S')
        valid_to = (datetime.now() + timedelta(days=valid_days)).strftime('%Y%m%d%H%M%S')
        
        cmd = [
            'ssh-keygen',
            '-s', str(self.ca_key_path),
            '-I', f"host-{hostname}-{valid_from}",
            '-h',  # Host certificate
            '-n', hostname,
            '-V', f"{valid_from}:{valid_to}",
            host_key_path
        ]
        
        try:
            result = subprocess.run(cmd, capture_output=True, text=True, check=True)
            return cert_path
        except subprocess.CalledProcessError as e:
            raise RuntimeError(f"Host certificate creation failed: {e.stderr}")

# secure_deployment.py - Deployment using SSH agent forwarding
import os
import subprocess
import logging
from contextlib import contextmanager

logger = logging.getLogger(__name__)

class SecureDeployment:
    def __init__(self):
        self.validate_ssh_agent()
    
    def validate_ssh_agent(self):
        """Validate SSH agent is running and has keys loaded"""
        if not os.getenv('SSH_AUTH_SOCK'):
            raise EnvironmentError("SSH agent not running. Start with: ssh-agent bash")
        
        # Check if agent has keys
        try:
            result = subprocess.run(['ssh-add', '-l'], capture_output=True, text=True)
            if result.returncode != 0 or 'no identities' in result.stdout:
                raise EnvironmentError("No SSH keys loaded in agent. Load keys with: ssh-add")
        except FileNotFoundError:
            raise EnvironmentError("ssh-add command not found")
    
    @contextmanager
    def ssh_connection(self, host, user='deploy'):
        """Create SSH connection using agent forwarding"""
        ssh_cmd_base = [
            'ssh',
            '-A',  # Enable agent forwarding
            '-o', 'StrictHostKeyChecking=yes',
            '-o', 'ForwardAgent=yes',
            '-o', 'ConnectTimeout=10',
            f'{user}@{host}'
        ]
        yield ssh_cmd_base
    
    def execute_deployment_command(self, host, command, user='deploy'):
        """Execute deployment command using SSH agent forwarding"""
        with self.ssh_connection(host, user) as ssh_base:
            ssh_cmd = ssh_base + [command]
            
            try:
                result = subprocess.run(
                    ssh_cmd,
                    capture_output=True,
                    text=True,
                    timeout=300,
                    check=True
                )
                logger.info(f"Command executed successfully on {host}: {command}")
                return result.stdout
            except subprocess.TimeoutExpired:
                raise TimeoutError(f"Command timed out on {host}: {command}")
            except subprocess.CalledProcessError as e:
                logger.error(f"Command failed on {host}: {e.stderr}")
                raise
    
    def deploy_with_git_pull(self, hosts, repository_path='/opt/app'):
        """Deploy by pulling latest code using agent forwarding"""
        deployment_commands = [
            f'cd {repository_path}',
            'git fetch origin',
            'git reset --hard origin/main',
            'sudo systemctl restart application'
        ]
        
        combined_command = ' && '.join(deployment_commands)
        
        for host in hosts:
            logger.info(f"Deploying to {host}")
            try:
                self.execute_deployment_command(host, combined_command)
                logger.info(f"Deployment successful on {host}")
            except Exception as e:
                logger.error(f"Deployment failed on {host}: {e}")
                raise

# SSH configuration for certificate-based auth
# ~/.ssh/config - Client configuration
Host *.company.com
    # Use certificate authentication
    CertificateFile ~/.ssh/id_ed25519-cert.pub
    IdentityFile ~/.ssh/id_ed25519
    
    # Enable agent forwarding for deployment
    ForwardAgent yes
    
    # Security settings
    StrictHostKeyChecking yes
    UserKnownHostsFile ~/.ssh/known_hosts
    
    # Certificate authority public key for host verification
    TrustedUserCAKeys ~/.ssh/ca-key.pub

# /etc/ssh/sshd_config - Server configuration for certificate auth
# Enable certificate authentication
TrustedUserCAKeys /etc/ssh/ca-key.pub
AuthorizedPrincipalsFile /etc/ssh/auth_principals/%u

# Host certificate for server identity
HostCertificate /etc/ssh/ssh_host_ed25519_key-cert.pub
HostKey /etc/ssh/ssh_host_ed25519_key

# Security settings
PasswordAuthentication no
PermitRootLogin no
X11Forwarding no
AllowTcpForwarding no
PermitTunnel no

# Usage example script
#!/bin/bash
# deploy.sh - Secure deployment using certificates and agent forwarding

# Ensure SSH agent is running
if [ -z "$SSH_AUTH_SOCK" ]; then
    echo "Starting SSH agent..."
    eval $(ssh-agent)
fi

# Load deployment key into agent
ssh-add ~/.ssh/id_ed25519

# Create short-lived certificate (4 hours)
python3 create_deployment_cert.py --user deploy --hours 4

# Execute deployment
python3 secure_deployment.py --environment production --version v1.2.3

# Clean up certificate
rm ~/.ssh/id_ed25519-cert.pub

Detect This Vulnerability in Your Code

Sourcery automatically identifies ssh private keys accidentally committed to git repositories and many other security issues in your codebase.