实战项目¶
📌 学习时间:5-7天 📌 难度级别:⭐⭐⭐⭐ 中高级 📌 前置知识:前面所有章节
📚 章节概述¶
本章通过三个完整的实战项目,综合运用前面所有章节的知识。每个项目都包含需求分析、架构设计和完整的代码实现,帮助你在实际场景中巩固 Linux 和 Shell 编程技能。
🎯 学习目标¶
- 完成「自动化服务器监控脚本」项目
- 完成「日志分析与报警系统」项目
- 完成「自动化部署脚本」项目
- 能独立设计和实现运维自动化脚本
📖 项目一:自动化服务器监控脚本¶
1.1 需求分析¶
- 定时监控服务器 CPU、内存、磁盘、网络状态
- 监控关键服务(Nginx/MySQL/Redis)是否在运行
- 超过阈值时发送告警(邮件/钉钉/企业微信)
- 生成每日监控报告
- 支持通过配置文件自定义阈值
1.2 项目结构¶
Bash
server-monitor/
├── config/
│ └── monitor.conf # 配置文件
├── lib/
│ ├── common.sh # 公共函数库
│ ├── check_cpu.sh # CPU 检查
│ ├── check_mem.sh # 内存检查
│ ├── check_disk.sh # 磁盘检查
│ ├── check_service.sh # 服务检查
│ └── notify.sh # 告警通知
├── logs/
│ └── monitor.log # 日志文件
├── reports/
│ └── daily_report.html # 日报
├── monitor.sh # 主脚本
├── report.sh # 报告生成脚本
└── install.sh # 安装脚本(配置 Crontab)
1.3 配置文件¶
Bash
# config/monitor.conf
# === 告警阈值 ===
CPU_WARN_THRESHOLD=70
CPU_CRIT_THRESHOLD=90
MEM_WARN_THRESHOLD=80
MEM_CRIT_THRESHOLD=95
DISK_WARN_THRESHOLD=80
DISK_CRIT_THRESHOLD=90
# === 监控的服务列表 ===
SERVICES="nginx mysql redis-server"
# === 告警方式 ===
# 可选: email, dingtalk, wechat, log
ALERT_METHOD="log"
# 邮件配置
ALERT_EMAIL="admin@example.com"
# 钉钉 Webhook
DINGTALK_WEBHOOK="https://oapi.dingtalk.com/robot/send?access_token=YOUR_TOKEN"
# === 检查间隔(秒)===
CHECK_INTERVAL=60
# === 日志配置 ===
LOG_DIR="/opt/server-monitor/logs"
LOG_KEEP_DAYS=30
1.4 公共函数库¶
Bash
#!/bin/bash
# lib/common.sh — 公共函数库
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
BASE_DIR="$(dirname "$SCRIPT_DIR")"
CONFIG_FILE="${BASE_DIR}/config/monitor.conf"
LOG_DIR="${BASE_DIR}/logs"
LOG_FILE="${LOG_DIR}/monitor.log"
# 加载配置
load_config() {
if [ -f "$CONFIG_FILE" ]; then
source "$CONFIG_FILE"
else
echo "配置文件不存在: $CONFIG_FILE"
exit 1
fi
}
# 日志函数
log_info() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] [INFO] $*" | tee -a "$LOG_FILE"; }
log_warn() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] [WARN] $*" | tee -a "$LOG_FILE" >&2; }
log_error() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] [ERROR] $*" | tee -a "$LOG_FILE" >&2; }
# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
pass() { printf "${GREEN}[OK]${NC} %s\n" "$1"; }
warn() { printf "${YELLOW}[WARN]${NC} %s\n" "$1"; }
fail() { printf "${RED}[CRIT]${NC} %s\n" "$1"; }
# 初始化
mkdir -p "$LOG_DIR"
load_config
1.5 各检查模块¶
Bash
#!/bin/bash
# lib/check_cpu.sh — CPU 检查
check_cpu() {
local cpu_usage
cpu_usage=$(top -bn1 | grep "Cpu(s)" | awk '{print int($2 + $4)}')
local status="OK"
if [ "$cpu_usage" -ge "${CPU_CRIT_THRESHOLD:-90}" ]; then
status="CRITICAL"
fail "CPU 使用率: ${cpu_usage}%(阈值: ${CPU_CRIT_THRESHOLD}%)"
elif [ "$cpu_usage" -ge "${CPU_WARN_THRESHOLD:-70}" ]; then
status="WARNING"
warn "CPU 使用率: ${cpu_usage}%(阈值: ${CPU_WARN_THRESHOLD}%)"
else
pass "CPU 使用率: ${cpu_usage}%"
fi
echo "$status|$cpu_usage"
}
Bash
#!/bin/bash
# lib/check_mem.sh — 内存检查
check_mem() {
local mem_info
mem_info=$(free | awk 'NR==2{printf "%d|%d|%d|%.0f", $2, $3, $7, $3/$2*100}')
local total=$(echo "$mem_info" | cut -d'|' -f1)
local used=$(echo "$mem_info" | cut -d'|' -f2)
local available=$(echo "$mem_info" | cut -d'|' -f3)
local usage=$(echo "$mem_info" | cut -d'|' -f4)
local status="OK"
if [ "$usage" -ge "${MEM_CRIT_THRESHOLD:-95}" ]; then
status="CRITICAL"
fail "内存使用率: ${usage}%(可用: $(numfmt --to=iec $((available*1024))))"
elif [ "$usage" -ge "${MEM_WARN_THRESHOLD:-80}" ]; then
status="WARNING"
warn "内存使用率: ${usage}%(可用: $(numfmt --to=iec $((available*1024))))"
else
pass "内存使用率: ${usage}%(可用: $(numfmt --to=iec $((available*1024))))"
fi
echo "$status|$usage"
}
Bash
#!/bin/bash
# lib/check_disk.sh — 磁盘检查
check_disk() {
local overall_status="OK"
while read -r line; do
local mount=$(echo "$line" | awk '{print $6}')
local usage=$(echo "$line" | awk '{print $5}' | tr -d '%')
local avail=$(echo "$line" | awk '{print $4}')
if [ "$usage" -ge "${DISK_CRIT_THRESHOLD:-90}" ]; then
overall_status="CRITICAL"
fail "磁盘 $mount 使用 ${usage}%(可用: $avail)"
elif [ "$usage" -ge "${DISK_WARN_THRESHOLD:-80}" ]; then
[ "$overall_status" != "CRITICAL" ] && overall_status="WARNING"
warn "磁盘 $mount 使用 ${usage}%(可用: $avail)"
else
pass "磁盘 $mount 使用 ${usage}%(可用: $avail)"
fi
done < <(df -h | grep '^/dev')
echo "$overall_status"
}
Bash
#!/bin/bash
# lib/check_service.sh — 服务检查
check_services() {
local overall_status="OK"
for service in $SERVICES; do
if systemctl is-active "$service" &>/dev/null; then
local pid=$(systemctl show -p MainPID "$service" | cut -d= -f2)
pass "服务 $service 运行正常 (PID: $pid)"
else
overall_status="CRITICAL"
fail "服务 $service 未运行!"
# 尝试自动重启
log_warn "尝试重启 $service..."
sudo systemctl restart "$service"
sleep 3
if systemctl is-active "$service" &>/dev/null; then
log_info "$service 重启成功"
else
log_error "$service 重启失败!"
fi
fi
done
echo "$overall_status"
}
1.6 告警通知模块¶
Bash
#!/bin/bash
# lib/notify.sh — 告警通知
send_alert() {
local level="$1" # WARNING / CRITICAL
local message="$2"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local hostname=$(hostname)
log_warn "[$level] $message"
case "${ALERT_METHOD}" in
email)
send_email "$level" "$message"
;;
dingtalk)
send_dingtalk "$level" "$message"
;;
log)
# 仅记录日志
;;
*)
log_error "未知的告警方式: ${ALERT_METHOD}"
;;
esac
}
send_email() {
local subject="[$1] 服务器告警 - $(hostname)"
local body="时间: $(date)\n主机: $(hostname)\n级别: $1\n详情: $2"
echo -e "$body" | mail -s "$subject" "${ALERT_EMAIL}"
}
send_dingtalk() {
local level="$1"
local message="$2"
curl -s -H "Content-Type: application/json" \
-d "{
\"msgtype\": \"markdown\",
\"markdown\": {
\"title\": \"服务器告警\",
\"text\": \"## [$level] 服务器告警\n\n- **主机**: $(hostname)\n- **时间**: $(date '+%Y-%m-%d %H:%M:%S')\n- **详情**: $message\"
}
}" "${DINGTALK_WEBHOOK}" > /dev/null 2>&1
}
1.7 主监控脚本¶
Bash
#!/bin/bash
# monitor.sh — 主监控脚本
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/lib/common.sh"
source "${SCRIPT_DIR}/lib/check_cpu.sh"
source "${SCRIPT_DIR}/lib/check_mem.sh"
source "${SCRIPT_DIR}/lib/check_disk.sh"
source "${SCRIPT_DIR}/lib/check_service.sh"
source "${SCRIPT_DIR}/lib/notify.sh"
echo "=================================="
echo " 服务器监控 — $(hostname)"
echo " $(date '+%Y-%m-%d %H:%M:%S')"
echo "=================================="
echo ""
# CPU 检查
echo "--- CPU ---"
cpu_result=$(check_cpu)
cpu_status=$(echo "$cpu_result" | tail -1 | cut -d'|' -f1)
# 内存检查
echo ""
echo "--- 内存 ---"
mem_result=$(check_mem)
mem_status=$(echo "$mem_result" | tail -1 | cut -d'|' -f1)
# 磁盘检查
echo ""
echo "--- 磁盘 ---"
disk_status=$(check_disk | tail -1)
# 服务检查
echo ""
echo "--- 服务 ---"
service_status=$(check_services | tail -1)
# 综合判断和告警
echo ""
echo "=================================="
has_critical=false
has_warning=false
alert_messages=""
for status in "$cpu_status" "$mem_status" "$disk_status" "$service_status"; do
case "$status" in
CRITICAL) has_critical=true ;;
WARNING) has_warning=true ;;
esac
done
if $has_critical; then
fail "综合状态: 严重告警"
send_alert "CRITICAL" "服务器存在严重问题,请立即处理!"
elif $has_warning; then
warn "综合状态: 一般告警"
send_alert "WARNING" "服务器存在潜在问题,请关注。"
else
pass "综合状态: 一切正常"
fi
log_info "监控检查完成 — CPU:$cpu_status MEM:$mem_status DISK:$disk_status SVC:$service_status"
📖 项目二:日志分析与报警系统¶
2.1 需求分析¶
- 实时监控 Nginx/应用日志
- 统计访问量、错误率、响应时间等指标
- 检测异常模式(暴力破解、DDoS、大量404)
- 生成可视化报告(终端表格或 HTML)
- 异常告警
2.2 核心脚本¶
Bash
#!/bin/bash
# log_analyzer.sh — Nginx 日志分析器
set -euo pipefail
LOG_FILE="${1:-/var/log/nginx/access.log}"
REPORT_DIR="./reports"
DATE=$(date +%Y%m%d)
mkdir -p "$REPORT_DIR"
if [ ! -f "$LOG_FILE" ]; then
echo "日志文件不存在: $LOG_FILE"
exit 1
fi
echo "========================================"
echo " Nginx 日志分析报告"
echo " 日志: $LOG_FILE"
echo " 时间: $(date)"
echo "========================================"
# 总请求数
total_requests=$(wc -l < "$LOG_FILE")
echo ""
echo "📊 总请求数: $total_requests"
# HTTP 状态码分布
echo ""
echo "📊 状态码分布:"
echo "----------------------------"
awk '{print $9}' "$LOG_FILE" | sort | uniq -c | sort -rn | \
while read count code; do
bar=$(printf '%*s' $((count * 50 / total_requests)) '' | tr ' ' '█')
printf " %s %6d (%5.1f%%) %s\n" "$code" "$count" "$(echo "scale=1; $count*100/$total_requests" | bc)" "$bar"
done
# 错误率
error_count=$(awk '$9 >= 400 {count++} END{print count+0}' "$LOG_FILE")
error_rate=$(echo "scale=2; $error_count * 100 / $total_requests" | bc 2>/dev/null || echo "0")
echo ""
echo "📊 错误率: ${error_rate}% ($error_count / $total_requests)"
# Top 10 访问 IP
echo ""
echo "📊 Top 10 访问 IP:"
echo "----------------------------"
awk '{print $1}' "$LOG_FILE" | sort | uniq -c | sort -rn | head -10 | \
while read count ip; do
printf " %-16s %6d 次\n" "$ip" "$count"
done
# Top 10 请求的 URL
echo ""
echo "📊 Top 10 请求 URL:"
echo "----------------------------"
awk '{print $7}' "$LOG_FILE" | sort | uniq -c | sort -rn | head -10 | \
while read count url; do
printf " %-50s %6d 次\n" "${url:0:50}" "$count"
done
# Top 10 404 页面
echo ""
echo "📊 Top 10 404 页面:"
echo "----------------------------"
awk '$9 == 404 {print $7}' "$LOG_FILE" | sort | uniq -c | sort -rn | head -10 | \
while read count url; do
printf " %-50s %6d 次\n" "${url:0:50}" "$count"
done
# 每小时请求量分布
echo ""
echo "📊 每小时请求量:"
echo "----------------------------"
awk -F'[/:]' '{print $4}' "$LOG_FILE" | sort | uniq -c | \
while read count hour; do
bar=$(printf '%*s' $((count * 40 / total_requests + 1)) '' | tr ' ' '▓')
printf " %s:00 %6d %s\n" "$hour" "$count" "$bar"
done
# 带宽统计
total_bytes=$(awk '{sum+=$10} END{print sum+0}' "$LOG_FILE")
echo ""
echo "📊 总传输量: $(numfmt --to=iec $total_bytes 2>/dev/null || echo "${total_bytes} bytes")"
# 响应时间分析(如果日志中有 $request_time 字段)
# 需要自定义 Nginx 日志格式
2.3 异常检测脚本¶
Bash
#!/bin/bash
# anomaly_detector.sh — 异常检测
set -euo pipefail
LOG_FILE="${1:-/var/log/nginx/access.log}"
AUTH_LOG="${2:-/var/log/auth.log}"
echo "🔍 异常检测报告 — $(date)"
echo ""
# 1. 检测可能的暴力破解(SSH)
echo "--- SSH 暴力破解检测 ---"
if [ -f "$AUTH_LOG" ]; then
echo "最近1小时失败的 SSH 登录尝试(Top 10 IP):"
grep "Failed password" "$AUTH_LOG" 2>/dev/null | \
grep "$(date -d '1 hour ago' '+%b %d %H')" 2>/dev/null | \
awk '{print $(NF-3)}' | sort | uniq -c | sort -rn | head -10 | \
while read count ip; do
if [ "$count" -gt 10 ]; then
echo " ⚠️ $ip: $count 次失败尝试(可能是暴力破解!)"
else
echo " ℹ️ $ip: $count 次失败尝试"
fi
done || true # grep 无匹配时返回1,防止 pipefail+set -e 终止脚本
fi
# 2. 检测大量 4xx/5xx 错误
echo ""
echo "--- HTTP 错误激增检测 ---"
if [ -f "$LOG_FILE" ]; then
recent_errors=$(tail -1000 "$LOG_FILE" | awk '$9 >= 400' | wc -l)
if [ "$recent_errors" -gt 100 ]; then
echo " ⚠️ 最近1000条请求中有 $recent_errors 个错误响应!"
else
echo " ✅ 错误率正常 ($recent_errors/1000)"
fi
fi
# 3. 检测异常流量(同一 IP 短时间大量请求)
echo ""
echo "--- 高频访问检测 ---"
if [ -f "$LOG_FILE" ]; then
tail -10000 "$LOG_FILE" | awk '{print $1}' | sort | uniq -c | sort -rn | head -5 | \
while read count ip; do
if [ "$count" -gt 1000 ]; then
echo " ⚠️ $ip: $count 次请求(疑似 DDoS 或爬虫)"
elif [ "$count" -gt 500 ]; then
echo " ℹ️ $ip: $count 次请求(较高频率)"
fi
done
fi
# 4. 检测大文件下载(带宽消耗)
echo ""
echo "--- 大带宽请求检测 ---"
if [ -f "$LOG_FILE" ]; then
awk '$10 > 104857600 {printf " %s 下载 %.1fMB - %s\n", $1, $10/1048576, $7}' "$LOG_FILE" | head -10
fi
echo ""
echo "检测完成 — $(date)"
📖 项目三:自动化部署脚本¶
3.1 需求分析¶
- 支持从 Git 仓库拉取代码
- 自动构建(编译/安装依赖)
- 蓝绿部署策略
- 自动回滚机制
- 健康检查
- 部署日志和通知
3.2 核心部署脚本¶
Bash
#!/bin/bash
# deploy.sh — 自动化部署脚本
# 用法: ./deploy.sh [branch] [--rollback]
set -euo pipefail
# ==================== 配置 ====================
APP_NAME="myapp"
REPO_URL="git@github.com:user/myapp.git"
DEPLOY_DIR="/opt/deploy"
APP_DIR="${DEPLOY_DIR}/${APP_NAME}"
RELEASES_DIR="${DEPLOY_DIR}/releases"
SHARED_DIR="${DEPLOY_DIR}/shared"
CURRENT_LINK="${DEPLOY_DIR}/current"
KEEP_RELEASES=5
BRANCH="${1:-main}"
LOG_FILE="${DEPLOY_DIR}/deploy.log"
HEALTH_CHECK_URL="http://localhost:8080/health"
HEALTH_CHECK_TIMEOUT=30
# ==================== 函数 ====================
timestamp() { date '+%Y-%m-%d %H:%M:%S'; }
log() { echo "[$(timestamp)] $*" | tee -a "$LOG_FILE"; }
die() {
log "❌ 错误: $*"
exit 1
}
check_deps() {
local deps=("git" "curl" "systemctl")
for cmd in "${deps[@]}"; do # Shell for循环
command -v "$cmd" &>/dev/null || die "缺少依赖: $cmd"
done
}
health_check() {
local url="$1"
local timeout="${2:-30}"
local attempt=0
log "执行健康检查: $url"
while [ $attempt -lt $timeout ]; do
if curl -sf "$url" > /dev/null 2>&1; then
log "✅ 健康检查通过"
return 0
fi
((attempt++))
sleep 1
done
log "❌ 健康检查失败(超时 ${timeout}秒)"
return 1
}
# ==================== 部署流程 ====================
deploy() {
local release_id=$(date +%Y%m%d%H%M%S)
local release_dir="${RELEASES_DIR}/${release_id}"
log "========================================="
log " 开始部署 ${APP_NAME}"
log " 分支: ${BRANCH}"
log " 发布: ${release_id}"
log "========================================="
# 1. 检查依赖
check_deps
# 2. 初始化目录
mkdir -p "$RELEASES_DIR" "$SHARED_DIR/logs" "$SHARED_DIR/config"
# 3. 拉取代码
log "📥 拉取代码..."
git clone --branch "$BRANCH" --depth 1 "$REPO_URL" "$release_dir" || \
die "代码拉取失败"
local commit=$(cd "$release_dir" && git rev-parse --short HEAD)
log "代码版本: ${commit}"
# 4. 创建共享目录软链接
log "🔗 链接共享目录..."
ln -sfn "${SHARED_DIR}/logs" "${release_dir}/logs"
ln -sfn "${SHARED_DIR}/config/.env" "${release_dir}/.env" 2>/dev/null || true
# 5. 安装依赖
log "📦 安装依赖..."
cd "$release_dir"
if [ -f "requirements.txt" ]; then # 条件测试:-f文件存在 -d目录存在 -z空字符串
pip install -r requirements.txt -q || die "Python依赖安装失败"
elif [ -f "package.json" ]; then
npm ci --production || die "Node.js依赖安装失败"
elif [ -f "go.mod" ]; then
go build -o "${APP_NAME}" . || die "Go构建失败"
fi
# 6. 运行测试(可选)
if [ -f "Makefile" ] && grep -q "^test:" Makefile; then # grep文本搜索:按模式匹配行
log "🧪 运行测试..."
make test || die "测试失败"
fi
# 7. 切换版本(原子操作)
log "🔀 切换到新版本..."
local old_release=""
if [ -L "$CURRENT_LINK" ]; then
old_release=$(readlink "$CURRENT_LINK")
fi
ln -sfn "$release_dir" "${CURRENT_LINK}.tmp"
mv -Tf "${CURRENT_LINK}.tmp" "$CURRENT_LINK"
# 8. 重启服务
log "🔄 重启服务..."
sudo systemctl restart "$APP_NAME" || die "服务重启失败"
# 9. 健康检查
sleep 3
if ! health_check "$HEALTH_CHECK_URL" "$HEALTH_CHECK_TIMEOUT"; then
log "⚠️ 健康检查失败,执行回滚..."
if [ -n "$old_release" ] && [ -d "$old_release" ]; then
ln -sfn "$old_release" "${CURRENT_LINK}.tmp"
mv -Tf "${CURRENT_LINK}.tmp" "$CURRENT_LINK"
sudo systemctl restart "$APP_NAME"
die "部署失败,已回滚到上一版本"
else
die "部署失败,无可回滚版本"
fi
fi
# 10. 清理旧版本
log "🧹 清理旧版本..."
cd "$RELEASES_DIR"
ls -1dt */ | tail -n +$((KEEP_RELEASES + 1)) | xargs rm -rf 2>/dev/null || true
# 11. 记录部署信息
echo "$release_id|$BRANCH|$commit|$(date)" >> "${DEPLOY_DIR}/deploy_history.log"
log "========================================="
log "✅ 部署成功!"
log " 版本: ${release_id} (${commit})"
log "========================================="
}
# ==================== 回滚流程 ====================
rollback() {
log "🔙 开始回滚..."
cd "$RELEASES_DIR"
local releases=($(ls -1dt */))
if [ ${#releases[@]} -lt 2 ]; then
die "没有可回滚的版本"
fi
local current_release="${releases[0]}"
local prev_release="${releases[1]}"
local prev_dir="${RELEASES_DIR}/${prev_release}"
log "当前版本: $current_release"
log "回滚到: $prev_release"
ln -sfn "$prev_dir" "${CURRENT_LINK}.tmp"
mv -Tf "${CURRENT_LINK}.tmp" "$CURRENT_LINK"
sudo systemctl restart "$APP_NAME"
sleep 3
if health_check "$HEALTH_CHECK_URL" 15; then
log "✅ 回滚成功!当前版本: $prev_release"
else
log "❌ 回滚后健康检查仍然失败!"
fi
}
# ==================== 主入口 ====================
case "${2:-deploy}" in
--rollback|-r)
rollback
;;
*)
deploy
;;
esac
3.3 安装和 Crontab 配置¶
Bash
#!/bin/bash
# install.sh — 安装脚本
set -euo pipefail
MONITOR_DIR="$(cd "$(dirname "$0")" && pwd)" # $()命令替换:执行命令并获取输出
echo "安装服务器监控系统..."
# 添加 Crontab
({ crontab -l 2>/dev/null || true; } ; cat << EOF # &&前一个成功才执行后一个;||前一个失败才执行
# 服务器监控 — 每5分钟
*/5 * * * * ${MONITOR_DIR}/monitor.sh >> ${MONITOR_DIR}/logs/cron.log 2>&1
# 每日报告 — 每天早上8点
0 8 * * * ${MONITOR_DIR}/report.sh >> ${MONITOR_DIR}/logs/cron.log 2>&1
# 日志清理 — 每天凌晨3点
0 3 * * * find ${MONITOR_DIR}/logs -name "*.log" -mtime +30 -delete
EOF
) | sort -u | crontab - # |管道:将前一命令的输出作为后一命令的输入
echo "✅ Crontab 配置完成"
crontab -l
📖 项目实践建议¶
如何学习这些项目¶
- 第一遍:完整阅读代码,理解整体架构
- 第二遍:逐个模块手动输入并运行
- 第三遍:修改配置和阈值,测试不同场景
- 拓展:在真实服务器上部署使用
进阶方向¶
- 添加 Prometheus + Grafana 监控
- 使用 Ansible/Terraform 进行基础设施管理
- 容器化部署(Docker + Kubernetes)
- CI/CD 流水线集成(Jenkins/GitLab CI/GitHub Actions)
🔧 练习题¶
- 完善服务器监控脚本,添加网络流量监控
- 为日志分析脚本增加 HTML 报告输出
- 修改部署脚本以支持 Docker 容器化部署
- 将三个项目整合,构建一个完整的运维工具箱
✅ 自我检查¶
- 完成了服务器监控项目的部署和测试
- 能使用日志分析脚本分析 Nginx 日志
- 理解蓝绿部署和自动回滚的原理
- 能独立编写运维自动化脚本
- 将项目托管到了 Git 仓库