一、测试验证结果
1.1 功能测试
#优化前
curl -X POST http://127.0.0.1:8080/api/users -H "Content-Type: application/json" -d '{"name":"张三","email":"zhangsan@example.com"}' -v
#优化后
curl -X POST http://127.0.0.1:8081/api/users -H "Content-Type: application/json" -d '{"name":"张三","email":"zhangsan@example.com"}' -v
1.2 性能测试
优化前:12000 qps
优化后:36000 qps
1.3 代码运行截图
二、问题根因分析
2.1 线程模型瓶颈
Spring Boot默认的Tomcat容器使用BIO/NIO模型,每个请求占用1个线程,当长连接数超过线程池容量时会发生排队甚至拒绝
2.2 同步阻塞处理
若业务逻辑中存在同步阻塞操作(如同步数据库查询),会快速耗尽线程资源
2.3 上下文切换开销
大量线程导致CPU时间片频繁切换,实际有效利用率降低
三、Netty替代方案(针对高并发长连接场景)
3.1 原架构
package com.example.controller;
import com.example.model.User;
import org.springframework.web.bind.annotation.*;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
@RestController
@RequestMapping("/api")
public class ApiController {
private final List users = new ArrayList<>();
private final AtomicLong counter = new AtomicLong();
// 测试接口
@GetMapping("/hello")
public String sayHello() {
return "Hello, Spring Boot!";
}
// 获取所有用户
@GetMapping("/users")
public List getAllUsers() {
return users;
}
// 创建用户
@PostMapping("/users")
public User createUser(@RequestBody User user) {
user.setId(counter.incrementAndGet());
users.add(user);
return user;
}
// 根据ID查询用户
@GetMapping("/users/{id}")
public User getUserById(@PathVariable Long id) {
return users.stream()
.filter(u -> u.getId().equals(id))
.findFirst()
.orElseThrow(() -> new RuntimeException("User not found"));
}
// 更新用户
@PutMapping("/users/{id}")
public User updateUser(@PathVariable Long id, @RequestBody User userDetails) {
User user = users.stream()
.filter(u -> u.getId().equals(id))
.findFirst()
.orElseThrow(() -> new RuntimeException("User not found"));
user.setName(userDetails.getName());
user.setEmail(userDetails.getEmail());
return user;
}
// 删除用户
@DeleteMapping("/users/{id}")
public String deleteUser(@PathVariable Long id) {
users.removeIf(u -> u.getId().equals(id));
return "User deleted successfully";
}
}
3.2. 整体架构调整(推荐方案)
package com.example.demo;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.*;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.HttpServerCodec;
import io.netty.handler.timeout.IdleStateHandler;
import io.netty.handler.traffic.ChannelTrafficShapingHandler;
import io.netty.util.concurrent.DefaultThreadFactory;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.concurrent.TimeUnit;
public class NettyServer {
// 监控指标(生产环境建议使用Prometheus等专业工具)
private static int activeConnections = 0;
public static void main(String[] args) {
// 优化线程池配置
EventLoopGroup bossGroup = new NioEventLoopGroup(1, new DefaultThreadFactory("boss"));
EventLoopGroup workerGroup = new NioEventLoopGroup(
Runtime.getRuntime().availableProcessors() * 2,
new DefaultThreadFactory("worker")
);
try {
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new ChannelInitializer() {
@Override
public void initChannel(SocketChannel ch) {
ChannelPipeline pipeline = ch.pipeline();
// 内存池优化
pipeline.addLast(new ChannelTrafficShapingHandler(1024 * 1024, 1024 * 1024));
// HTTP协议处理
pipeline.addLast(new HttpServerCodec());
pipeline.addLast(new HttpObjectAggregator(10 * 1024 * 1024));
// 超时控制(读超时60秒)
pipeline.addLast(new IdleStateHandler(60, 0, 0, TimeUnit.SECONDS));
// 监控处理器
pipeline.addLast(new ConnectionMonitorHandler());
// 业务逻辑处理器
pipeline.addLast(new BusinessHandler());
// 统一异常处理
pipeline.addLast(new GlobalExceptionHandler());
}
})
// 服务器参数优化
.option(ChannelOption.SO_BACKLOG, 65535)
.option(ChannelOption.SO_REUSEADDR, true)
.childOption(ChannelOption.TCP_NODELAY, true)
.childOption(ChannelOption.SO_KEEPALIVE, true)
.childOption(ChannelOption.WRITE_BUFFER_WATER_MARK,
new WriteBufferWaterMark(8 * 1024, 32 * 1024))
.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
// 绑定端口并添加监听器
ChannelFuture f = b.bind(8081)
.addListener((ChannelFutureListener) future -> {
if (future.isSuccess()) {
System.out.println("================服务启动成功 端口: 8081====================");
printSystemInfo();
} else {
System.err.println("端口绑定失败: " + future.cause().getMessage());
System.exit(1);
}
})
.sync();
// 添加关闭钩子
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
System.out.println("服务已优雅关闭");
}));
f.channel().closeFuture().sync();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
// 监控处理器
@ChannelHandler.Sharable
private static class ConnectionMonitorHandler extends ChannelDuplexHandler {
@Override
public void channelActive(ChannelHandlerContext ctx) {
activeConnections++;
ctx.fireChannelActive();
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
activeConnections--;
ctx.fireChannelInactive();
}
}
// 全局异常处理
@ChannelHandler.Sharable
private static class GlobalExceptionHandler extends ChannelInboundHandlerAdapter {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
if (cause instanceof IOException && "Connection reset by peer".equals(cause.getMessage())) {
System.out.println("客户端强制断开: " + ctx.channel().remoteAddress());
ctx.close();
} else {
System.err.println("未处理异常: ");
cause.printStackTrace();
ctx.close();
}
}
}
// 系统信息打印
private static void printSystemInfo() {
System.out.println("当前配置参数:");
System.out.println("CPU核心数: " + Runtime.getRuntime().availableProcessors());
System.out.println("最大内存: " + Runtime.getRuntime().maxMemory() / 1024 / 1024 + "MB");
System.out.println("JVM参数: " + ManagementFactory.getRuntimeMXBean().getInputArguments());
}
}
3.3 关键优化点
- 异步非阻塞处理:所有IO操作在NIO线程完成,业务处理移交独立线程池
- 连接管理优化:
// 限制最大连接数
public class ConnectionLimiter extends ChannelInboundHandlerAdapter {
private static final AtomicInteger connections = new AtomicInteger(0);
private static final int MAX_CONN = 10000;
@Override
public void channelActive(ChannelHandlerContext ctx) {
if (connections.incrementAndGet() > MAX_CONN) {
ctx.close();
return;
}
// 正常处理
}
}
协议优化:采用二进制协议(如Protobuf)替代JSON,降低序列化开销
四、Spring Boot整合方案(兼容现有服务)
4.1 混合部署模式
// 在Spring Boot中启动Netty
@SpringBootApplication
public class HybridApp {
public static void main(String[] args) {
SpringApplication.run(HybridApp.class, args);
new NettyServer().start(); // 启动Netty服务
}
}
// 配置不同的端口
server.port=8080 # Spring MVC端口
netty.port=8081 # Netty服务端口
4.2 服务分流策略
- 将高频长连接请求(如即时消息、实时定位)路由到Netty服务
- 保留Spring MVC处理RESTful短连接请求
五、补充优化措施
5.1 业务逻辑优化
-// 异步处理模板
public class AsyncProcessor {
private static final Executor bizExecutor = Executors.newFixedThreadPool(
Runtime.getRuntime().availableProcessors() * 2); // 独立业务线程池
public void process(ChannelHandlerContext ctx, Object msg) {
CompletableFuture.supplyAsync(() -> {
// 业务处理逻辑
return processBusiness(msg);
}, bizExecutor).thenAcceptAsync(result -> {
ctx.writeAndFlush(result);
}, ctx.executor()); // 回到IO线程写响应
}
}
5.2 监控配置
io.netty
netty-monitor
${netty.version}
// 连接数监控
public class ConnectionMonitor extends ChannelInboundHandlerAdapter {
private static final Counter connections = Metrics.counter("netty.connections");
@Override
public void channelActive(ChannelHandlerContext ctx) {
connections.increment();
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
connections.decrement();
}
}
六、实施建议
6.1 分阶段改造
先对压力最大的接口进行Netty改造
6.2 压力测试
使用wrk进行对比测试
# 测试命令示例
./wrk -t12 -c400 -d30s -T5s -s post_test.lua http://localhost:8081/api/users
6.3 监控指标
重点关注qps、cpu利用率、gc次数等核心指标
改造后预期效果(经验值):
- 连接容量提升:从Tomcat默认200~500提升到10,000+
- CPU利用率下降:相同压力下可降低30%-50%
- 内存消耗:减少线程栈内存占用(每个线程约1MB)
建议先通过jstack分析当前线程状态,确认是否真的是线程数过多导致的问题,再针对性优化。同时要确保业务代码没有同步阻塞操作,必要时结合Arthas进行线上诊断。
希望这篇文章对你有所帮助!如果觉得不错,别忘了点赞收藏哦!