Welcome to the advanced tier! You've mastered the fundamentals and intermediate concepts. Now we'll explore the sophisticated techniques that separate power users from systems architects.
#!/bin/bash
# Function with parameters
function backup_database() {
local db_name="$1"
local backup_dir="$2"
local timestamp=$(date +%Y%m%d_%H%M%S)
if [ -z "$db_name" ] || [ -z "$backup_dir" ]; then
echo "Usage: backup_database <db_name> <backup_dir>" >&2
return 1
fi
mysqldump "$db_name" > "${backup_dir}/${db_name}_${timestamp}.sql"
echo "Database $db_name backed up to ${backup_dir}/${db_name}_${timestamp}.sql"
}
# Function with return values
function is_service_running() {
local service_name="$1"
if systemctl is-active --quiet "$service_name"; then
return 0 # Service is running
else
return 1 # Service is not running
fi
}
# Usage
backup_database "myapp" "/backups"
if is_service_running "nginx"; then
echo "Nginx is running"
fi
#!/bin/bash
# Advanced parameter parsing
function parse_args() {
local OPTIND
while getopts "hvf:d:n:" opt; do
case $opt in
h)
show_help
exit 0
;;
v)
VERBOSE=true
;;
f)
CONFIG_FILE="$OPTARG"
;;
d)
DEBUG_LEVEL="$OPTARG"
;;
n)
DRY_RUN=true
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
esac
done
shift $((OPTIND-1))
REMAINING_ARGS=("$@")
}
# Long options with getopt
function parse_long_args() {
local options=$(getopt -o hvf:d:n: --long help,verbose,file:,debug:,dry-run,config-file: -n "$0" -- "$@")
eval set -- "$options"
while true; do
case $1 in
-h|--help)
show_help
exit 0
;;
-v|--verbose)
VERBOSE=true
shift
;;
-f|--file|--config-file)
CONFIG_FILE="$2"
shift 2
;;
-d|--debug)
DEBUG_LEVEL="$2"
shift 2
;;
-n|--dry-run)
DRY_RUN=true
shift
;;
--)
shift
break
;;
*)
echo "Unknown option: $1" >&2
exit 1
;;
esac
done
}
#!/bin/bash
set -euo pipefail # Exit on error, undefined vars, pipe failures
# Logging functions
LOG_FILE="/var/log/myscript.log"
DEBUG_MODE=false
function log_message() {
local level="$1"
shift
local message="$*"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
case "$level" in
DEBUG)
$DEBUG_MODE && echo "[$timestamp] DEBUG: $message" | tee -a "$LOG_FILE"
;;
INFO)
echo "[$timestamp] INFO: $message" | tee -a "$LOG_FILE"
;;
WARN)
echo "[$timestamp] WARN: $message" | tee -a "$LOG_FILE" >&2
;;
ERROR)
echo "[$timestamp] ERROR: $message" | tee -a "$LOG_FILE" >&2
;;
esac
}
# Error trapping
function cleanup() {
log_message "INFO" "Script interrupted, cleaning up..."
# Cleanup code here
exit 1
}
trap cleanup INT TERM EXIT
# Usage
log_message "INFO" "Script started"
log_message "DEBUG" "Processing file: $filename"
log_message "ERROR" "Failed to connect to database"
sed
Operations# Multi-line pattern replacement
sed -n '1h;1!H;${g;s/pattern1.*pattern2/replacement/g;p;}' file.txt
# Conditional replacements
sed '/start_pattern/,/end_pattern/s/old/new/g' file.txt
# Insert lines before/after patterns
sed '/pattern/i\This line is inserted before' file.txt
sed '/pattern/a\This line is inserted after' file.txt
# Create backup and edit in place
sed -i.bak 's/old/new/g' file.txt
# Advanced address ranges
sed '10,20d' file.txt # Delete lines 10-20
sed '10,+5d' file.txt # Delete line 10 and next 5
sed '10~3d' file.txt # Delete every 3rd line starting from 10
awk
Programming# BEGIN and END blocks with complex logic
awk 'BEGIN {
FS = ","
OFS = "\t"
print "Processing CSV file..."
}
{
if (NR == 1) {
# Header row
for (i = 1; i <= NF; i++) {
headers[i] = $i
}
next
}
# Data processing
total += $3
count++
if ($3 > max_value) {
max_value = $3
max_line = NR
}
}
END {
print "Summary:"
print "Total records:", count
print "Average:", total/count
print "Maximum value:", max_value, "at line", max_line
}' data.csv
# User-defined functions
awk '
function celsius_to_fahrenheit(c) {
return (c * 9/5) + 32
}
function fahrenheit_to_celsius(f) {
return (f - 32) * 5/9
}
{
if ($1 == "C") {
print $2 "°C =", celsius_to_fahrenheit($2) "°F"
} else if ($1 == "F") {
print $2 "°F =", fahrenheit_to_celsius($2) "°C"
}
}' temperature.txt
grep
with PCRE# Perl-compatible regular expressions
grep -P '(?<=\d{4}-\d{2}-\d{2}\s)\d{2}:\d{2}:\d{2}' logfile.txt
# Look-ahead and look-behind assertions
grep -P '(?=.*[A-Z])(?=.*[a-z])(?=.*\d)(?=.*[!@#$%^&*]).{8,}' passwords.txt
# Named groups
grep -P '(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})' dates.txt
# Multi-line matching
grep -Pzo '(?s)function.*?{.*?}' script.js
systemctl
and Service Management# Service analysis
systemctl list-dependencies nginx # Show service dependencies
systemctl show nginx # Show all service properties
systemctl edit nginx # Create override file
systemctl mask nginx # Prevent service from starting
systemctl unmask nginx # Remove mask
# Custom service creation
cat > /etc/systemd/system/myapp.service << EOF
[Unit]
Description=My Application
After=network.target
[Service]
Type=simple
User=myapp
WorkingDirectory=/opt/myapp
ExecStart=/opt/myapp/start.sh
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable myapp
systemctl start myapp
# Process monitoring with advanced options
ps axo pid,ppid,pgid,sid,comm,args --forest
# Memory analysis
ps -eo pid,comm,pmem,rss,vsize --sort=-pmem | head -20
# Process tree visualization
pstree -p -u username
# Advanced process control
kill -STOP 1234 # Suspend process
kill -CONT 1234 # Resume process
kill -USR1 1234 # Send user signal
# Process priority management
nice -n 10 command # Start with lower priority
renice -n 5 -p 1234 # Change priority of running process
# Network namespaces
ip netns add test_namespace
ip netns exec test_namespace ip link set lo up
ip netns exec test_namespace bash
# Advanced routing
ip route add 192.168.100.0/24 via 192.168.1.1 dev eth0 metric 100
ip route add default via 192.168.1.1 dev eth0 table custom_table
ip rule add from 192.168.100.0/24 table custom_table
# Traffic control
tc qdisc add dev eth0 root handle 1: htb default 12
tc class add dev eth0 parent 1: classid 1:1 htb rate 100mbit
tc class add dev eth0 parent 1:1 classid 1:10 htb rate 80mbit ceil 100mbit
tc class add dev eth0 parent 1:1 classid 1:12 htb rate 20mbit ceil 100mbit
# Network monitoring
ss -tuln --processes # Show listening sockets with processes
ss -i # Show socket statistics
netstat -i # Interface statistics
# Set ACL permissions
setfacl -m u:username:rwx /path/to/file
setfacl -m g:groupname:r-x /path/to/directory
setfacl -m o::--- /path/to/file # Remove others permissions
# Default ACLs for directories
setfacl -d -m u:username:rwx /path/to/directory
# View ACLs
getfacl /path/to/file
# Remove ACLs
setfacl -x u:username /path/to/file
setfacl -b /path/to/file # Remove all ACLs
# Extended attributes
chattr +i file.txt # Make file immutable
chattr +a file.txt # Make file append-only
chattr +c file.txt # Enable compression
lsattr file.txt # List attributes
# File capabilities
setcap cap_net_bind_service=+ep /usr/bin/myapp
getcap /usr/bin/myapp
# SELinux contexts (if applicable)
ls -Z file.txt # Show security context
chcon -t httpd_exec_t /path/to/script
setsebool -P httpd_can_network_connect on
# Compare outputs of two commands
diff <(command1) <(command2)
# Use command output as input file
while read line; do
echo "Processing: $line"
done < <(find /etc -name "*.conf")
# Multiple inputs
paste <(cut -d: -f1 /etc/passwd) <(cut -d: -f3 /etc/passwd)
# Complex sequences
echo {01..10} # Zero-padded numbers
echo {a..z..2} # Every 2nd letter
echo {1..10..3} # Step by 3
# Nested expansions
mkdir -p project/{src,tests,docs}/{js,css,html}
touch file{1..5}.{txt,log,conf}
# Combination with variables
prefix="log"
echo ${prefix}{1..5}.txt
# Parameter manipulation
filename="/path/to/file.txt"
echo ${filename##*/} # Extract basename: file.txt
echo ${filename%/*} # Extract dirname: /path/to
echo ${filename%%.*} # Remove extension: /path/to/file
echo ${filename#*.} # Get extension: txt
# Default values and conditionals
echo ${VAR:-default} # Use default if VAR is unset
echo ${VAR:=default} # Set VAR to default if unset
echo ${VAR:+alternative} # Use alternative if VAR is set
echo ${VAR:?error message} # Error if VAR is unset
# String manipulation
string="Hello World"
echo ${string^} # Capitalize first letter
echo ${string^^} # Uppercase all
echo ${string,,} # Lowercase all
echo ${string/l/L} # Replace first 'l' with 'L'
echo ${string//l/L} # Replace all 'l' with 'L'
# CPU performance
mpstat 1 5 # CPU usage every second for 5 iterations
sar -u 1 10 # CPU utilization
iostat -x 1 # I/O statistics
# Memory analysis
vmstat 1 5 # Virtual memory statistics
free -h -s 1 # Memory usage updated every second
slabtop # Kernel slab allocator info
# I/O performance
iotop # I/O usage by process
iftop # Network I/O by connection
find
Operations# Complex search criteria
find /var/log -type f -name "*.log" -size +100M -mtime +7 -exec ls -lh {} \;
# Performance optimization
find /large/directory -type f -print0 | xargs -0 grep -l "pattern"
# Execute multiple commands
find . -name "*.tmp" -exec rm {} \; -print
# Advanced permissions
find /etc -type f -perm 644 -user root
find /usr/bin -type f -perm /u+s # Find SUID files
# Real-time log monitoring
tail -f /var/log/auth.log | grep --line-buffered "Failed password"
# Advanced log analysis
journalctl -u nginx.service --since "2025-01-01" --until "2025-01-31"
journalctl -f -u nginx.service
journalctl --disk-usage
journalctl --vacuum-time=7d
# Security monitoring
lastlog # Last login times
last -n 20 # Recent logins
who -a # Current users
w # What users are doing
#!/bin/bash
# System health monitoring script
function check_disk_usage() {
local threshold=80
df -h | awk -v threshold=$threshold '
NR>1 && $5 ~ /^[0-9]+%$/ {
usage = substr($5, 1, length($5)-1)
if (usage > threshold) {
print "WARNING: " $6 " is " usage "% full"
}
}'
}
function check_memory_usage() {
local mem_usage=$(free | awk '/Mem:/ { printf("%.2f", $3/$2 * 100) }')
if (( $(echo "$mem_usage > 80" | bc -l) )); then
echo "WARNING: Memory usage is ${mem_usage}%"
fi
}
function check_load_average() {
local load=$(uptime | awk -F'load average:' '{print $2}' | awk '{print $1}' | tr -d ',')
local cpu_cores=$(nproc)
if (( $(echo "$load > $cpu_cores" | bc -l) )); then
echo "WARNING: Load average ($load) exceeds CPU cores ($cpu_cores)"
fi
}
# Main monitoring function
function system_health_check() {
echo "=== System Health Check - $(date) ==="
check_disk_usage
check_memory_usage
check_load_average
echo "=== End Health Check ==="
}
# JSON processing with jq
cat api_response.json | jq '.data[] | select(.status == "active") | {name: .name, email: .email}'
# CSV to JSON conversion
awk -F, 'NR==1 {for(i=1;i<=NF;i++) h[i]=$i; next} {printf "{"; for(i=1;i<=NF;i++) printf "\"%s\":\"%s\"%s", h[i], $i, (i==NF?"":","); print "}"}' data.csv
# Multi-file processing
find . -name "*.log" -print0 | \
xargs -0 -I {} sh -c 'echo "=== {} ==="; grep -c "ERROR" "{}"' | \
awk '/^===/ {file=$0; next} {print file ": " $0 " errors"}'
# Complex pattern matching
grep -P '(?=.*[A-Z])(?=.*[a-z])(?=.*\d)(?=.*[@$!%*?&])[A-Za-z\d@$!%*?&]{8,}' passwords.txt
# Email validation
grep -E '^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$' emails.txt
# IP address extraction
grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b' logfile.txt
# URL extraction
grep -oP 'https?://[^\s]+' file.txt
# Complex cron expressions
# Run every 15 minutes during business hours on weekdays
*/15 9-17 * * 1-5 /path/to/script.sh
# Advanced at scheduling
at now + 5 minutes << EOF
/path/to/backup.sh
logger "Backup completed at $(date)"
EOF
# Systemd timers (modern alternative to cron)
cat > /etc/systemd/system/backup.timer << EOF
[Unit]
Description=Run backup script
Requires=backup.service
[Timer]
OnCalendar=daily
Persistent=true
[Install]
WantedBy=timers.target
EOF
# SSH automation with key-based authentication
ssh-keygen -t rsa -b 4096 -f ~/.ssh/automation_key
ssh-copy-id -i ~/.ssh/automation_key.pub user@remote-host
# Parallel execution across multiple hosts
parallel-ssh -h hosts.txt -l username -A -i "uptime"
# Advanced SSH usage
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
-i ~/.ssh/automation_key user@host 'bash -s' < local_script.sh
#!/bin/bash
# Collect logs from multiple servers and analyze
function collect_logs() {
local servers=("web1" "web2" "db1" "cache1")
local log_dir="/tmp/collected_logs"
mkdir -p "$log_dir"
for server in "${servers[@]}"; do
echo "Collecting logs from $server..."
ssh "$server" "tar -czf - /var/log/application/*.log" > \
"$log_dir/${server}_logs.tar.gz"
done
}
function analyze_logs() {
local log_dir="/tmp/collected_logs"
local analysis_report="/tmp/log_analysis_$(date +%Y%m%d).txt"
echo "=== Log Analysis Report - $(date) ===" > "$analysis_report"
for archive in "$log_dir"/*.tar.gz; do
server=$(basename "$archive" _logs.tar.gz)
echo "=== $server ===" >> "$analysis_report"
tar -xzf "$archive" -O | \
awk '
/ERROR/ { errors++ }
/WARN/ { warnings++ }
/INFO/ { info++ }
END {
print "Errors: " errors
print "Warnings: " warnings
print "Info: " info
print "Total: " (errors + warnings + info)
}' >> "$analysis_report"
done
}
#!/bin/bash
# Real-time system performance dashboard
function create_dashboard() {
while true; do
clear
echo "=== System Performance Dashboard - $(date) ==="
echo
# CPU usage
echo "CPU Usage:"
top -bn1 | grep "Cpu(s)" | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | \
awk '{print " Usage: " 100 - $1 "%"}'
# Memory usage
echo "Memory Usage:"
free -h | awk 'NR==2{printf " Used: %s/%s (%.2f%%)\n", $3, $2, $3*100/$2}'
# Disk usage
echo "Disk Usage:"
df -h | awk '$NF=="/" {printf " Root: %s/%s (%s)\n", $3, $2, $5}'
# Load average
echo "Load Average:"
uptime | awk -F'load average:' '{print " " $2}'
# Top processes
echo "Top CPU Processes:"
ps aux --sort=-%cpu | head -6 | tail -5 | \
awk '{printf " %s: %.1f%%\n", $11, $3}'
sleep 5
done
}
#!/bin/bash
# Incremental backup system with compression and encryption
function incremental_backup() {
local source_dir="$1"
local backup_root="$2"
local backup_name="$3"
local encryption_key="$4"
local timestamp=$(date +%Y%m%d_%H%M%S)
local backup_dir="$backup_root/$backup_name"
local full_backup_dir="$backup_dir/full"
local incr_backup_dir="$backup_dir/incremental"
mkdir -p "$full_backup_dir" "$incr_backup_dir"
# Check if full backup exists
if [ ! -f "$full_backup_dir/backup.tar.gz.gpg" ]; then
echo "Creating full backup..."
tar -czf - "$source_dir" | \
gpg --symmetric --cipher-algo AES256 --passphrase "$encryption_key" \
--output "$full_backup_dir/backup_${timestamp}.tar.gz.gpg"
touch "$full_backup_dir/timestamp"
echo "Full backup completed: $full_backup_dir/backup_${timestamp}.tar.gz.gpg"
else
echo "Creating incremental backup..."
find "$source_dir" -newer "$full_backup_dir/timestamp" -type f | \
tar -czf - -T - | \
gpg --symmetric --cipher-algo AES256 --passphrase "$encryption_key" \
--output "$incr_backup_dir/backup_${timestamp}.tar.gz.gpg"
echo "Incremental backup completed: $incr_backup_dir/backup_${timestamp}.tar.gz.gpg"
fi
}
Congratulations! You've reached the advanced level of bash mastery. You now possess:
Technical Excellence:
sed
, awk
, and grep
techniquesProfessional Skills:
Problem-Solving Abilities:
You've mastered the command line at an advanced level, but the learning never stops. Consider exploring:
You're no longer just a user—you're a systems architect, a problem solver, and a force multiplier for your organization. Your advanced bash skills enable you to:
The command line is now your canvas, and you're equipped to paint solutions that others can only dream of. Every script you write, every automation you create, and every problem you solve contributes to a more efficient, reliable, and powerful computing environment.
Remember: with great power comes great responsibility. Use your skills to build, to help, and to make the digital world a better place.
"Advanced bash mastery isn't just about knowing commands—it's about seeing possibilities where others see obstacles, and building bridges where others see walls."
Welcome to the ranks of command line masters. The terminal is your domain, and there's no limit to what you can achieve.