性能优化概述
MySQL 性能优化是一个系统性工程,涉及硬件配置、数据库配置、查询优化、索引设计、架构设计等多个层面。有效的性能优化可以显著提升应用程序的响应速度和用户体验。
🔍 查询优化
优化 SQL 查询语句,使用合适的索引和查询策略
📊 索引优化
设计和维护高效的索引结构
⚙️ 配置优化
调整 MySQL 服务器配置参数
🏗️ 架构优化
数据库架构设计和分库分表策略
80%
                        查询优化贡献
                    15%
                        索引优化贡献
                    3%
                        配置优化贡献
                    2%
                        硬件优化贡献
                    
                    性能优化的基本原则:
                    
            - 测量优先:先测量再优化,避免过早优化
 - 找到瓶颈:识别真正的性能瓶颈点
 - 逐步优化:一次优化一个问题
 - 验证效果:优化后验证性能提升
 - 监控持续:建立持续的性能监控
 
查询性能分析
准备测试数据
-- 创建性能测试表
CREATE TABLE performance_test (
    id INT AUTO_INCREMENT PRIMARY KEY,
    user_id INT NOT NULL,
    product_id INT NOT NULL,
    order_date DATE NOT NULL,
    amount DECIMAL(10,2) NOT NULL,
    status VARCHAR(20) NOT NULL,
    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
    updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);
-- 创建用户表
CREATE TABLE users_perf (
    id INT AUTO_INCREMENT PRIMARY KEY,
    username VARCHAR(50) NOT NULL,
    email VARCHAR(100) NOT NULL,
    age INT,
    city VARCHAR(50),
    registration_date DATE NOT NULL
);
-- 创建产品表
CREATE TABLE products_perf (
    id INT AUTO_INCREMENT PRIMARY KEY,
    name VARCHAR(100) NOT NULL,
    category VARCHAR(50) NOT NULL,
    price DECIMAL(10,2) NOT NULL,
    stock_quantity INT DEFAULT 0
);
-- 插入测试数据(模拟大量数据)
DELIMITER //
CREATE PROCEDURE GenerateTestData(IN record_count INT)
BEGIN
    DECLARE i INT DEFAULT 1;
    DECLARE user_count INT DEFAULT 10000;
    DECLARE product_count INT DEFAULT 1000;
    
    -- 生成用户数据
    WHILE i <= user_count DO
        INSERT INTO users_perf (username, email, age, city, registration_date)
        VALUES (
            CONCAT('user', i),
            CONCAT('user', i, '@example.com'),
            FLOOR(18 + RAND() * 50),
            CASE FLOOR(RAND() * 5)
                WHEN 0 THEN 'Beijing'
                WHEN 1 THEN 'Shanghai'
                WHEN 2 THEN 'Guangzhou'
                WHEN 3 THEN 'Shenzhen'
                ELSE 'Hangzhou'
            END,
            DATE_SUB(CURDATE(), INTERVAL FLOOR(RAND() * 1000) DAY)
        );
        SET i = i + 1;
    END WHILE;
    
    -- 生成产品数据
    SET i = 1;
    WHILE i <= product_count DO
        INSERT INTO products_perf (name, category, price, stock_quantity)
        VALUES (
            CONCAT('Product ', i),
            CASE FLOOR(RAND() * 5)
                WHEN 0 THEN 'Electronics'
                WHEN 1 THEN 'Clothing'
                WHEN 2 THEN 'Books'
                WHEN 3 THEN 'Home'
                ELSE 'Sports'
            END,
            ROUND(10 + RAND() * 1000, 2),
            FLOOR(RAND() * 1000)
        );
        SET i = i + 1;
    END WHILE;
    
    -- 生成订单数据
    SET i = 1;
    WHILE i <= record_count DO
        INSERT INTO performance_test (user_id, product_id, order_date, amount, status)
        VALUES (
            FLOOR(1 + RAND() * user_count),
            FLOOR(1 + RAND() * product_count),
            DATE_SUB(CURDATE(), INTERVAL FLOOR(RAND() * 365) DAY),
            ROUND(10 + RAND() * 1000, 2),
            CASE FLOOR(RAND() * 4)
                WHEN 0 THEN 'pending'
                WHEN 1 THEN 'completed'
                WHEN 2 THEN 'cancelled'
                ELSE 'shipped'
            END
        );
        
        IF i % 10000 = 0 THEN
            SELECT CONCAT('Generated ', i, ' records') as progress;
        END IF;
        
        SET i = i + 1;
    END WHILE;
END //
DELIMITER ;
-- 生成测试数据(根据需要调整数量)
-- CALL GenerateTestData(100000);  -- 生成10万条订单记录
-- 手动插入少量测试数据用于演示
INSERT INTO users_perf (username, email, age, city, registration_date) VALUES
('alice', 'alice@example.com', 25, 'Beijing', '2023-01-15'),
('bob', 'bob@example.com', 30, 'Shanghai', '2023-02-20'),
('charlie', 'charlie@example.com', 28, 'Guangzhou', '2023-03-10');
INSERT INTO products_perf (name, category, price, stock_quantity) VALUES
('Laptop', 'Electronics', 999.99, 50),
('T-Shirt', 'Clothing', 29.99, 200),
('Book', 'Books', 19.99, 100);
INSERT INTO performance_test (user_id, product_id, order_date, amount, status) VALUES
(1, 1, '2024-01-15', 999.99, 'completed'),
(2, 2, '2024-01-16', 29.99, 'pending'),
(3, 3, '2024-01-17', 19.99, 'shipped'),
(1, 2, '2024-01-18', 29.99, 'completed'),
(2, 1, '2024-01-19', 999.99, 'cancelled');
                
                使用 EXPLAIN 分析查询
-- 基本的 EXPLAIN 使用
EXPLAIN SELECT * FROM performance_test WHERE user_id = 1;
-- 详细的 EXPLAIN 分析
EXPLAIN FORMAT=JSON SELECT 
    pt.id,
    pt.amount,
    pt.order_date,
    u.username,
    p.name as product_name
FROM performance_test pt
JOIN users_perf u ON pt.user_id = u.id
JOIN products_perf p ON pt.product_id = p.id
WHERE pt.status = 'completed'
  AND pt.order_date >= '2024-01-01'
ORDER BY pt.order_date DESC
LIMIT 10;
-- 分析慢查询
EXPLAIN SELECT 
    u.city,
    COUNT(*) as order_count,
    SUM(pt.amount) as total_amount,
    AVG(pt.amount) as avg_amount
FROM performance_test pt
JOIN users_perf u ON pt.user_id = u.id
WHERE pt.order_date BETWEEN '2023-01-01' AND '2023-12-31'
GROUP BY u.city
HAVING total_amount > 1000
ORDER BY total_amount DESC;
-- 使用 EXPLAIN ANALYZE(MySQL 8.0+)
-- EXPLAIN ANALYZE SELECT * FROM performance_test WHERE amount > 500;
-- 查看执行计划的关键指标
SELECT 
    'Key Metrics for EXPLAIN Analysis' as info,
    'type: 访问类型 (const > eq_ref > ref > range > index > ALL)' as type_info,
    'key: 使用的索引' as key_info,
    'rows: 扫描的行数' as rows_info,
    'Extra: 额外信息 (Using index, Using filesort, Using temporary)' as extra_info;
                
                性能监控查询
-- 查看慢查询日志状态
SHOW VARIABLES LIKE 'slow_query%';
SHOW VARIABLES LIKE 'long_query_time';
-- 启用慢查询日志
-- SET GLOBAL slow_query_log = 'ON';
-- SET GLOBAL long_query_time = 2;  -- 记录执行时间超过2秒的查询
-- 查看当前连接和查询状态
SHOW PROCESSLIST;
-- 查看数据库状态信息
SHOW STATUS LIKE 'Slow_queries';
SHOW STATUS LIKE 'Questions';
SHOW STATUS LIKE 'Uptime';
-- 计算慢查询比例
SELECT 
    VARIABLE_VALUE as slow_queries
FROM INFORMATION_SCHEMA.GLOBAL_STATUS 
WHERE VARIABLE_NAME = 'Slow_queries';
SELECT 
    VARIABLE_VALUE as total_queries
FROM INFORMATION_SCHEMA.GLOBAL_STATUS 
WHERE VARIABLE_NAME = 'Questions';
-- 查看表的统计信息
SELECT 
    table_name,
    table_rows,
    avg_row_length,
    data_length,
    index_length,
    (data_length + index_length) as total_size
FROM information_schema.tables 
WHERE table_schema = DATABASE()
ORDER BY total_size DESC;
-- 查看索引使用情况
SELECT 
    table_name,
    index_name,
    column_name,
    cardinality
FROM information_schema.statistics 
WHERE table_schema = DATABASE()
ORDER BY table_name, index_name;
                
                查询性能测试
-- 创建性能测试函数
DELIMITER //
CREATE PROCEDURE BenchmarkQuery(
    IN query_text TEXT,
    IN iterations INT
)
BEGIN
    DECLARE start_time TIMESTAMP(6);
    DECLARE end_time TIMESTAMP(6);
    DECLARE i INT DEFAULT 0;
    DECLARE total_time DECIMAL(10,6) DEFAULT 0;
    
    SET start_time = NOW(6);
    
    WHILE i < iterations DO
        -- 这里需要根据实际查询替换
        SELECT COUNT(*) FROM performance_test WHERE status = 'completed' INTO @dummy;
        SET i = i + 1;
    END WHILE;
    
    SET end_time = NOW(6);
    SET total_time = TIMESTAMPDIFF(MICROSECOND, start_time, end_time) / 1000000;
    
    SELECT 
        iterations as test_iterations,
        total_time as total_seconds,
        total_time / iterations as avg_seconds_per_query,
        iterations / total_time as queries_per_second;
END //
DELIMITER ;
-- 测试不同查询的性能
-- 无索引查询
SET @start_time = NOW(6);
SELECT COUNT(*) FROM performance_test WHERE user_id = 1;
SET @end_time = NOW(6);
SELECT TIMESTAMPDIFF(MICROSECOND, @start_time, @end_time) / 1000 as 'Query Time (ms)';
-- 创建索引后测试
CREATE INDEX idx_user_id ON performance_test(user_id);
SET @start_time = NOW(6);
SELECT COUNT(*) FROM performance_test WHERE user_id = 1;
SET @end_time = NOW(6);
SELECT TIMESTAMPDIFF(MICROSECOND, @start_time, @end_time) / 1000 as 'Query Time with Index (ms)';
-- 复合查询性能测试
CREATE INDEX idx_status_date ON performance_test(status, order_date);
SET @start_time = NOW(6);
SELECT COUNT(*), SUM(amount) 
FROM performance_test 
WHERE status = 'completed' 
  AND order_date >= '2024-01-01';
SET @end_time = NOW(6);
SELECT TIMESTAMPDIFF(MICROSECOND, @start_time, @end_time) / 1000 as 'Complex Query Time (ms)';
-- 连接查询性能测试
SET @start_time = NOW(6);
SELECT 
    u.username,
    COUNT(pt.id) as order_count,
    SUM(pt.amount) as total_amount
FROM users_perf u
LEFT JOIN performance_test pt ON u.id = pt.user_id
GROUP BY u.id, u.username
HAVING order_count > 0;
SET @end_time = NOW(6);
SELECT TIMESTAMPDIFF(MICROSECOND, @start_time, @end_time) / 1000 as 'JOIN Query Time (ms)';
                
            索引优化策略
索引设计原则
                    索引设计的黄金法则:
                    
                - 选择性高的列:优先为选择性高的列创建索引
 - 最左前缀原则:复合索引遵循最左前缀匹配
 - 覆盖索引:尽量使用覆盖索引避免回表
 - 避免过多索引:平衡查询性能和写入性能
 - 定期维护:定期分析和重建索引
 
-- 分析列的选择性
SELECT 
    'user_id' as column_name,
    COUNT(DISTINCT user_id) as distinct_values,
    COUNT(*) as total_rows,
    COUNT(DISTINCT user_id) / COUNT(*) as selectivity
FROM performance_test
UNION ALL
SELECT 
    'status',
    COUNT(DISTINCT status),
    COUNT(*),
    COUNT(DISTINCT status) / COUNT(*)
FROM performance_test
UNION ALL
SELECT 
    'order_date',
    COUNT(DISTINCT order_date),
    COUNT(*),
    COUNT(DISTINCT order_date) / COUNT(*)
FROM performance_test;
-- 创建高效的复合索引
-- 根据查询模式设计索引
CREATE INDEX idx_user_status_date ON performance_test(user_id, status, order_date);
CREATE INDEX idx_date_status_amount ON performance_test(order_date, status, amount);
-- 创建覆盖索引
CREATE INDEX idx_covering_order ON performance_test(user_id, status, order_date, amount);
-- 分析索引效果
EXPLAIN SELECT user_id, status, order_date, amount 
FROM performance_test 
WHERE user_id = 1 AND status = 'completed';
-- 前缀索引(适用于长字符串)
ALTER TABLE users_perf ADD COLUMN description TEXT;
CREATE INDEX idx_desc_prefix ON users_perf(description(50));
-- 函数索引(MySQL 8.0+)
-- CREATE INDEX idx_year_month ON performance_test((YEAR(order_date)), (MONTH(order_date)));
-- 查看索引使用统计
SELECT 
    OBJECT_SCHEMA as db_name,
    OBJECT_NAME as table_name,
    INDEX_NAME as index_name,
    COUNT_FETCH as index_fetches,
    COUNT_INSERT as index_inserts,
    COUNT_UPDATE as index_updates,
    COUNT_DELETE as index_deletes
FROM performance_schema.table_io_waits_summary_by_index_usage
WHERE OBJECT_SCHEMA = DATABASE()
ORDER BY COUNT_FETCH DESC;
                
                索引维护和优化
-- 查看索引碎片情况
SELECT 
    table_name,
    index_name,
    stat_name,
    stat_value,
    stat_description
FROM mysql.innodb_index_stats 
WHERE database_name = DATABASE()
ORDER BY table_name, index_name;
-- 分析表和索引
ANALYZE TABLE performance_test;
ANALYZE TABLE users_perf;
ANALYZE TABLE products_perf;
-- 检查索引基数
SHOW INDEX FROM performance_test;
-- 重建索引(当索引碎片严重时)
-- ALTER TABLE performance_test DROP INDEX idx_user_id;
-- ALTER TABLE performance_test ADD INDEX idx_user_id(user_id);
-- 或者使用 OPTIMIZE TABLE
-- OPTIMIZE TABLE performance_test;
-- 查找未使用的索引
SELECT 
    t.TABLE_SCHEMA as db_name,
    t.TABLE_NAME as table_name,
    t.INDEX_NAME as index_name,
    t.COLUMN_NAME as column_name
FROM information_schema.statistics t
LEFT JOIN performance_schema.table_io_waits_summary_by_index_usage p 
    ON t.TABLE_SCHEMA = p.OBJECT_SCHEMA 
    AND t.TABLE_NAME = p.OBJECT_NAME 
    AND t.INDEX_NAME = p.INDEX_NAME
WHERE t.TABLE_SCHEMA = DATABASE()
  AND t.INDEX_NAME != 'PRIMARY'
  AND (p.COUNT_FETCH IS NULL OR p.COUNT_FETCH = 0)
ORDER BY t.TABLE_NAME, t.INDEX_NAME;
-- 查找重复索引
SELECT 
    a.TABLE_SCHEMA,
    a.TABLE_NAME,
    a.INDEX_NAME as index1,
    b.INDEX_NAME as index2,
    a.COLUMN_NAME
FROM information_schema.statistics a
JOIN information_schema.statistics b 
    ON a.TABLE_SCHEMA = b.TABLE_SCHEMA
    AND a.TABLE_NAME = b.TABLE_NAME
    AND a.COLUMN_NAME = b.COLUMN_NAME
    AND a.INDEX_NAME != b.INDEX_NAME
    AND a.INDEX_NAME < b.INDEX_NAME
WHERE a.TABLE_SCHEMA = DATABASE()
ORDER BY a.TABLE_NAME, a.COLUMN_NAME;
-- 索引大小分析
SELECT 
    table_name,
    ROUND(data_length / 1024 / 1024, 2) as data_size_mb,
    ROUND(index_length / 1024 / 1024, 2) as index_size_mb,
    ROUND((index_length / data_length) * 100, 2) as index_ratio_percent
FROM information_schema.tables
WHERE table_schema = DATABASE()
  AND data_length > 0
ORDER BY index_size_mb DESC;
                
            查询优化技巧
SQL 查询优化
-- 1. 避免 SELECT *
-- 不好的做法
SELECT * FROM performance_test WHERE user_id = 1;
-- 好的做法
SELECT id, user_id, amount, order_date, status 
FROM performance_test 
WHERE user_id = 1;
-- 2. 使用 LIMIT 限制结果集
-- 分页查询优化
SELECT id, user_id, amount, order_date
FROM performance_test
WHERE status = 'completed'
ORDER BY order_date DESC
LIMIT 20 OFFSET 0;
-- 更好的分页方式(使用游标)
SELECT id, user_id, amount, order_date
FROM performance_test
WHERE status = 'completed'
  AND id > 1000  -- 上一页的最后一个ID
ORDER BY id
LIMIT 20;
-- 3. 优化 WHERE 条件
-- 使用索引友好的条件
SELECT * FROM performance_test 
WHERE order_date >= '2024-01-01' 
  AND order_date < '2024-02-01';
-- 避免在 WHERE 中使用函数
-- 不好的做法
SELECT * FROM performance_test WHERE YEAR(order_date) = 2024;
-- 好的做法
SELECT * FROM performance_test 
WHERE order_date >= '2024-01-01' 
  AND order_date < '2025-01-01';
-- 4. 优化 JOIN 查询
-- 确保 JOIN 条件有索引
CREATE INDEX idx_product_id ON performance_test(product_id);
SELECT 
    pt.id,
    pt.amount,
    u.username,
    p.name as product_name
FROM performance_test pt
INNER JOIN users_perf u ON pt.user_id = u.id
INNER JOIN products_perf p ON pt.product_id = p.id
WHERE pt.status = 'completed'
  AND pt.order_date >= '2024-01-01';
-- 5. 使用 EXISTS 代替 IN(大数据集)
-- 使用 EXISTS
SELECT u.id, u.username
FROM users_perf u
WHERE EXISTS (
    SELECT 1 FROM performance_test pt 
    WHERE pt.user_id = u.id 
      AND pt.status = 'completed'
);
-- 6. 优化子查询
-- 将相关子查询改为 JOIN
-- 不好的做法
SELECT 
    u.username,
    (SELECT COUNT(*) FROM performance_test pt WHERE pt.user_id = u.id) as order_count
FROM users_perf u;
-- 好的做法
SELECT 
    u.username,
    COALESCE(pt_count.order_count, 0) as order_count
FROM users_perf u
LEFT JOIN (
    SELECT user_id, COUNT(*) as order_count
    FROM performance_test
    GROUP BY user_id
) pt_count ON u.id = pt_count.user_id;
                
                聚合查询优化
-- 创建聚合友好的索引
CREATE INDEX idx_status_date_amount ON performance_test(status, order_date, amount);
-- 优化 GROUP BY 查询
SELECT 
    status,
    COUNT(*) as order_count,
    SUM(amount) as total_amount,
    AVG(amount) as avg_amount
FROM performance_test
WHERE order_date >= '2024-01-01'
GROUP BY status
ORDER BY total_amount DESC;
-- 使用覆盖索引优化聚合
EXPLAIN SELECT status, COUNT(*), SUM(amount)
FROM performance_test
WHERE order_date >= '2024-01-01'
GROUP BY status;
-- 分区聚合(处理大数据集)
SELECT 
    DATE_FORMAT(order_date, '%Y-%m') as month,
    status,
    COUNT(*) as order_count,
    SUM(amount) as total_amount
FROM performance_test
WHERE order_date >= '2023-01-01'
GROUP BY DATE_FORMAT(order_date, '%Y-%m'), status
ORDER BY month, status;
-- 使用临时表优化复杂聚合
CREATE TEMPORARY TABLE monthly_stats AS
SELECT 
    DATE_FORMAT(order_date, '%Y-%m') as month,
    COUNT(*) as order_count,
    SUM(amount) as total_amount,
    AVG(amount) as avg_amount
FROM performance_test
WHERE order_date >= '2023-01-01'
GROUP BY DATE_FORMAT(order_date, '%Y-%m');
-- 基于临时表进行进一步分析
SELECT 
    month,
    order_count,
    total_amount,
    total_amount - LAG(total_amount) OVER (ORDER BY month) as month_growth
FROM monthly_stats
ORDER BY month;
DROP TEMPORARY TABLE monthly_stats;
                
                批量操作优化
-- 批量插入优化
-- 使用 INSERT ... VALUES 批量插入
INSERT INTO performance_test (user_id, product_id, order_date, amount, status) VALUES
(1, 1, '2024-01-20', 100.00, 'pending'),
(2, 2, '2024-01-20', 200.00, 'pending'),
(3, 3, '2024-01-20', 300.00, 'pending'),
(1, 2, '2024-01-20', 150.00, 'pending'),
(2, 1, '2024-01-20', 250.00, 'pending');
-- 使用 LOAD DATA INFILE(最快的批量导入方式)
-- LOAD DATA INFILE '/path/to/data.csv'
-- INTO TABLE performance_test
-- FIELDS TERMINATED BY ','
-- LINES TERMINATED BY '\n'
-- (user_id, product_id, order_date, amount, status);
-- 批量更新优化
-- 使用 CASE WHEN 进行批量更新
UPDATE performance_test 
SET status = CASE 
    WHEN id IN (1, 3, 5) THEN 'completed'
    WHEN id IN (2, 4) THEN 'shipped'
    ELSE status
END
WHERE id IN (1, 2, 3, 4, 5);
-- 使用临时表进行批量更新
CREATE TEMPORARY TABLE temp_updates (
    id INT PRIMARY KEY,
    new_status VARCHAR(20)
);
INSERT INTO temp_updates VALUES
(1, 'completed'),
(2, 'shipped'),
(3, 'cancelled');
UPDATE performance_test pt
JOIN temp_updates tu ON pt.id = tu.id
SET pt.status = tu.new_status;
DROP TEMPORARY TABLE temp_updates;
-- 批量删除优化
-- 分批删除大量数据
DELIMITER //
CREATE PROCEDURE BatchDelete(
    IN table_name VARCHAR(64),
    IN where_condition TEXT,
    IN batch_size INT
)
BEGIN
    DECLARE done INT DEFAULT FALSE;
    DECLARE affected_rows INT;
    
    REPEAT
        SET @sql = CONCAT('DELETE FROM ', table_name, ' WHERE ', where_condition, ' LIMIT ', batch_size);
        PREPARE stmt FROM @sql;
        EXECUTE stmt;
        DEALLOCATE PREPARE stmt;
        
        SET affected_rows = ROW_COUNT();
        
        -- 短暂休息,避免长时间锁表
        SELECT SLEEP(0.1);
        
    UNTIL affected_rows < batch_size END REPEAT;
END //
DELIMITER ;
-- 使用批量删除
-- CALL BatchDelete('performance_test', 'order_date < "2023-01-01"', 1000);
                
            MySQL 配置优化
内存配置优化
-- 查看当前内存配置
SHOW VARIABLES LIKE 'innodb_buffer_pool_size';
SHOW VARIABLES LIKE 'key_buffer_size';
SHOW VARIABLES LIKE 'query_cache_size';
SHOW VARIABLES LIKE 'tmp_table_size';
SHOW VARIABLES LIKE 'max_heap_table_size';
-- 推荐的内存配置(根据服务器内存调整)
-- innodb_buffer_pool_size = 70-80% of total RAM
-- key_buffer_size = 25% of RAM (for MyISAM)
-- query_cache_size = 64M-256M
-- tmp_table_size = 64M-256M
-- max_heap_table_size = 64M-256M
-- 查看缓冲池使用情况
SHOW STATUS LIKE 'Innodb_buffer_pool%';
-- 计算缓冲池命中率
SELECT 
    ROUND(
        (1 - (Innodb_buffer_pool_reads / Innodb_buffer_pool_read_requests)) * 100, 2
    ) as buffer_pool_hit_rate
FROM (
    SELECT 
        VARIABLE_VALUE as Innodb_buffer_pool_reads
    FROM INFORMATION_SCHEMA.GLOBAL_STATUS 
    WHERE VARIABLE_NAME = 'Innodb_buffer_pool_reads'
) reads
CROSS JOIN (
    SELECT 
        VARIABLE_VALUE as Innodb_buffer_pool_read_requests
    FROM INFORMATION_SCHEMA.GLOBAL_STATUS 
    WHERE VARIABLE_NAME = 'Innodb_buffer_pool_read_requests'
) requests;
                    
                连接和线程配置
-- 查看连接配置
SHOW VARIABLES LIKE 'max_connections';
SHOW VARIABLES LIKE 'max_user_connections';
SHOW VARIABLES LIKE 'thread_cache_size';
SHOW VARIABLES LIKE 'table_open_cache';
-- 查看当前连接状态
SHOW STATUS LIKE 'Connections';
SHOW STATUS LIKE 'Max_used_connections';
SHOW STATUS LIKE 'Threads_connected';
SHOW STATUS LIKE 'Threads_running';
-- 计算连接使用率
SELECT 
    max_used.VARIABLE_VALUE as max_used_connections,
    max_conn.VARIABLE_VALUE as max_connections,
    ROUND(
        (max_used.VARIABLE_VALUE / max_conn.VARIABLE_VALUE) * 100, 2
    ) as connection_usage_percent
FROM 
    (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'Max_used_connections') max_used
CROSS JOIN 
    (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME = 'max_connections') max_conn;
-- 推荐配置
-- max_connections = 200-500 (根据应用需求)
-- thread_cache_size = 8-16
-- table_open_cache = 2000-4000
                    
                InnoDB 配置优化
-- 查看 InnoDB 配置
SHOW VARIABLES LIKE 'innodb_log_file_size';
SHOW VARIABLES LIKE 'innodb_log_buffer_size';
SHOW VARIABLES LIKE 'innodb_flush_log_at_trx_commit';
SHOW VARIABLES LIKE 'innodb_file_per_table';
SHOW VARIABLES LIKE 'innodb_io_capacity';
-- 查看 InnoDB 状态
SHOW ENGINE INNODB STATUS\G
-- 推荐的 InnoDB 配置
-- innodb_log_file_size = 256M-1G
-- innodb_log_buffer_size = 16M-64M
-- innodb_flush_log_at_trx_commit = 1 (安全) 或 2 (性能)
-- innodb_file_per_table = ON
-- innodb_io_capacity = 200 (SSD: 2000-20000)
-- 监控 InnoDB 性能指标
SELECT 
    'InnoDB Performance Metrics' as category,
    'Buffer Pool Hit Rate' as metric,
    CONCAT(
        ROUND(
            (1 - (Innodb_buffer_pool_reads / Innodb_buffer_pool_read_requests)) * 100, 2
        ), '%'
    ) as value
FROM (
    SELECT 
        CAST(VARIABLE_VALUE AS UNSIGNED) as Innodb_buffer_pool_reads
    FROM INFORMATION_SCHEMA.GLOBAL_STATUS 
    WHERE VARIABLE_NAME = 'Innodb_buffer_pool_reads'
) reads
CROSS JOIN (
    SELECT 
        CAST(VARIABLE_VALUE AS UNSIGNED) as Innodb_buffer_pool_read_requests
    FROM INFORMATION_SCHEMA.GLOBAL_STATUS 
    WHERE VARIABLE_NAME = 'Innodb_buffer_pool_read_requests'
) requests;
                    
                性能监控和诊断
性能监控查询
-- 创建性能监控视图
CREATE VIEW performance_summary AS
SELECT 
    'Database Performance Summary' as report_type,
    NOW() as report_time,
    (
        SELECT VARIABLE_VALUE 
        FROM INFORMATION_SCHEMA.GLOBAL_STATUS 
        WHERE VARIABLE_NAME = 'Uptime'
    ) as uptime_seconds,
    (
        SELECT VARIABLE_VALUE 
        FROM INFORMATION_SCHEMA.GLOBAL_STATUS 
        WHERE VARIABLE_NAME = 'Questions'
    ) as total_queries,
    (
        SELECT VARIABLE_VALUE 
        FROM INFORMATION_SCHEMA.GLOBAL_STATUS 
        WHERE VARIABLE_NAME = 'Slow_queries'
    ) as slow_queries,
    (
        SELECT VARIABLE_VALUE 
        FROM INFORMATION_SCHEMA.GLOBAL_STATUS 
        WHERE VARIABLE_NAME = 'Threads_connected'
    ) as current_connections;
-- 查看性能摘要
SELECT * FROM performance_summary;
-- 查看最耗时的查询(需要启用 performance_schema)
SELECT 
    DIGEST_TEXT as query_pattern,
    COUNT_STAR as exec_count,
    AVG_TIMER_WAIT / 1000000000 as avg_time_seconds,
    MAX_TIMER_WAIT / 1000000000 as max_time_seconds,
    SUM_TIMER_WAIT / 1000000000 as total_time_seconds
FROM performance_schema.events_statements_summary_by_digest
ORDER BY SUM_TIMER_WAIT DESC
LIMIT 10;
-- 查看表的 I/O 统计
SELECT 
    OBJECT_SCHEMA as db_name,
    OBJECT_NAME as table_name,
    COUNT_READ as read_operations,
    COUNT_WRITE as write_operations,
    COUNT_FETCH as fetch_operations,
    COUNT_INSERT as insert_operations,
    COUNT_UPDATE as update_operations,
    COUNT_DELETE as delete_operations
FROM performance_schema.table_io_waits_summary_by_table
WHERE OBJECT_SCHEMA = DATABASE()
ORDER BY (COUNT_READ + COUNT_WRITE) DESC;
-- 查看锁等待情况
SELECT 
    OBJECT_SCHEMA as db_name,
    OBJECT_NAME as table_name,
    INDEX_NAME,
    LOCK_TYPE,
    LOCK_MODE,
    COUNT_STAR as lock_count,
    SUM_TIMER_WAIT / 1000000000 as total_wait_seconds
FROM performance_schema.table_lock_waits_summary_by_table
WHERE OBJECT_SCHEMA = DATABASE()
ORDER BY SUM_TIMER_WAIT DESC;
                
                慢查询分析
-- 启用慢查询日志
-- SET GLOBAL slow_query_log = 'ON';
-- SET GLOBAL long_query_time = 1;
-- SET GLOBAL log_queries_not_using_indexes = 'ON';
-- 查看慢查询配置
SHOW VARIABLES LIKE 'slow_query_log%';
SHOW VARIABLES LIKE 'long_query_time';
SHOW VARIABLES LIKE 'log_queries_not_using_indexes';
-- 创建慢查询分析表
CREATE TABLE slow_query_analysis (
    id INT AUTO_INCREMENT PRIMARY KEY,
    query_time DECIMAL(10,6),
    lock_time DECIMAL(10,6),
    rows_sent INT,
    rows_examined INT,
    query_text TEXT,
    analysis_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- 模拟慢查询
SELECT 
    pt.id,
    pt.user_id,
    pt.amount,
    u.username,
    p.name,
    SLEEP(0.1) as artificial_delay  -- 人为增加延迟
FROM performance_test pt
JOIN users_perf u ON pt.user_id = u.id
JOIN products_perf p ON pt.product_id = p.id
WHERE pt.amount > 100
ORDER BY pt.amount DESC;
-- 分析查询性能问题
DELIMITER //
CREATE PROCEDURE AnalyzeQueryPerformance()
BEGIN
    DECLARE done INT DEFAULT FALSE;
    DECLARE query_pattern TEXT;
    DECLARE avg_time DECIMAL(10,6);
    DECLARE exec_count INT;
    
    -- 查找最慢的查询模式
    SELECT 
        'Top Slow Query Patterns' as analysis_type,
        'Pattern' as query_info,
        'Avg Time (s)' as avg_time_info,
        'Exec Count' as exec_count_info;
    
    -- 这里可以添加更复杂的分析逻辑
    SELECT 
        LEFT(DIGEST_TEXT, 100) as query_pattern,
        ROUND(AVG_TIMER_WAIT / 1000000000, 6) as avg_time_seconds,
        COUNT_STAR as execution_count,
        ROUND(SUM_TIMER_WAIT / 1000000000, 6) as total_time_seconds
    FROM performance_schema.events_statements_summary_by_digest
    WHERE AVG_TIMER_WAIT > 1000000000  -- 超过1秒的查询
    ORDER BY AVG_TIMER_WAIT DESC
    LIMIT 5;
END //
DELIMITER ;
-- 执行性能分析
CALL AnalyzeQueryPerformance();
-- 查看当前正在执行的查询
SELECT 
    ID,
    USER,
    HOST,
    DB,
    COMMAND,
    TIME,
    STATE,
    LEFT(INFO, 100) as QUERY_PREVIEW
FROM INFORMATION_SCHEMA.PROCESSLIST
WHERE COMMAND != 'Sleep'
ORDER BY TIME DESC;
                
                性能优化建议
                    🚀 性能优化最佳实践:
                    
                - 监控先行:建立完善的性能监控体系
 - 基准测试:优化前后进行性能对比
 - 逐步优化:一次只优化一个方面
 - 索引优先:80% 的性能问题可通过索引解决
 - 查询重写:优化 SQL 语句结构
 - 配置调优:根据硬件和负载调整配置
 - 架构优化:考虑读写分离、分库分表
 
| 优化类型 | 性能提升 | 实施难度 | 维护成本 | 适用场景 | 
|---|---|---|---|---|
| 索引优化 | 极高 | 低 | 低 | 查询密集型应用 | 
| 查询优化 | 高 | 中 | 低 | 复杂查询场景 | 
| 配置优化 | 中 | 低 | 低 | 所有场景 | 
| 硬件升级 | 中 | 低 | 高 | 资源瓶颈明显 | 
| 架构重构 | 极高 | 高 | 高 | 大规模应用 | 
-- 清理示例对象
DROP VIEW IF EXISTS performance_summary;
DROP TABLE IF EXISTS slow_query_analysis;
DROP PROCEDURE IF EXISTS AnalyzeQueryPerformance;
DROP PROCEDURE IF EXISTS BenchmarkQuery;
DROP PROCEDURE IF EXISTS BatchDelete;
DROP PROCEDURE IF EXISTS GenerateTestData;
DROP TABLE IF EXISTS performance_test;
DROP TABLE IF EXISTS users_perf;
DROP TABLE IF EXISTS products_perf;
SELECT 'Performance optimization tutorial completed. All example objects cleaned up.' AS message;