Java,Netty,TCP和UDP连接集成:没有可用于UDP连接的缓冲区空间

我有使用TCP和UDP协议的应用程序。 主要假设是客户端通过TCP协议连接到服务器,并且在建立连接时,正在发送UDP数据报。 我必须支持两种连接服务器的方案: – 客户端在服务器运行时连接 – 客户端在服务器关闭时连接并重试连接直到服务器再次启动

对于第一个场景,一切都很好:我连接两个工作。 问题出在第二种情况。 当客户端尝试几次通过TCP连接并最终连接时,UDP连接函数会抛出exception:

java.net.SocketException: No buffer space available (maximum connections reached?): bind at sun.nio.ch.Net.bind0(Native Method) at sun.nio.ch.Net.bind(Net.java:344) at sun.nio.ch.DatagramChannelImpl.bind(DatagramChannelImpl.java:684) at sun.nio.ch.DatagramSocketAdaptor.bind(DatagramSocketAdaptor.java:91) at io.netty.channel.socket.nio.NioDatagramChannel.doBind(NioDatagramChannel.java:192) at io.netty.channel.AbstractChannel$AbstractUnsafe.bind(AbstractChannel.java:484) at io.netty.channel.DefaultChannelPipeline$HeadContext.bind(DefaultChannelPipeline.java:1080) at io.netty.channel.AbstractChannelHandlerContext.invokeBind(AbstractChannelHandlerContext.java:430) at io.netty.channel.AbstractChannelHandlerContext.bind(AbstractChannelHandlerContext.java:415) at io.netty.channel.DefaultChannelPipeline.bind(DefaultChannelPipeline.java:903) at io.netty.channel.AbstractChannel.bind(AbstractChannel.java:197) at io.netty.bootstrap.AbstractBootstrap$2.run(AbstractBootstrap.java:350) at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:380) at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:357) at io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:116) at io.netty.util.concurrent.DefaultThreadFactory$DefaultRunnableDecorator.run(DefaultThreadFactory.java:137) at java.lang.Thread.run(Thread.java:722) 

当我重新启动客户端应用程序而不对服务器做任何事情时,客户端将连接任何问题

什么可能导致问题?

在下面我附上类的源代码。 所有源代码都来自官方Netty项目页面中的示例。 我已经中等化的唯一一点是我用非静态变量替换了静态变量和函数。 这导致将来我需要许多TCP-UDP连接到多个服务器。

 public final class UptimeClient { static final String HOST = System.getProperty("host", "192.168.2.193"); static final int PORT = Integer.parseInt(System.getProperty("port", "2011")); static final int RECONNECT_DELAY = Integer.parseInt(System.getProperty("reconnectDelay", "5")); static final int READ_TIMEOUT = Integer.parseInt(System.getProperty("readTimeout", "10")); private static UptimeClientHandler handler; public void runClient() throws Exception { configureBootstrap(new Bootstrap()).connect(); } private Bootstrap configureBootstrap(Bootstrap b) { return configureBootstrap(b, new NioEventLoopGroup()); } @Override protected Object clone() throws CloneNotSupportedException { return super.clone(); //To change body of generated methods, choose Tools | Templates. } Bootstrap configureBootstrap(Bootstrap b, EventLoopGroup g) { if(handler == null){ handler = new UptimeClientHandler(this); } b.group(g) .channel(NioSocketChannel.class) .remoteAddress(HOST, PORT) .handler(new ChannelInitializer() { @Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(new IdleStateHandler(READ_TIMEOUT, 0, 0), handler); } }); return b; } void connect(Bootstrap b) { b.connect().addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.cause() != null) { handler.startTime = -1; handler.println("Failed to connect: " + future.cause()); } } }); } } @Sharable public class UptimeClientHandler extends SimpleChannelInboundHandler { UptimeClient client; public UptimeClientHandler(UptimeClient client){ this.client = client; } long startTime = -1; @Override public void channelActive(ChannelHandlerContext ctx) { try { if (startTime < 0) { startTime = System.currentTimeMillis(); } println("Connected to: " + ctx.channel().remoteAddress()); new QuoteOfTheMomentClient(null).run(); } catch (Exception ex) { Logger.getLogger(UptimeClientHandler.class.getName()).log(Level.SEVERE, null, ex); } } @Override public void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { } @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { if (!(evt instanceof IdleStateEvent)) { return; } IdleStateEvent e = (IdleStateEvent) evt; if (e.state() == IdleState.READER_IDLE) { // The connection was OK but there was no traffic for last period. println("Disconnecting due to no inbound traffic"); ctx.close(); } } @Override public void channelInactive(final ChannelHandlerContext ctx) { println("Disconnected from: " + ctx.channel().remoteAddress()); } @Override public void channelUnregistered(final ChannelHandlerContext ctx) throws Exception { println("Sleeping for: " + UptimeClient.RECONNECT_DELAY + 's'); final EventLoop loop = ctx.channel().eventLoop(); loop.schedule(new Runnable() { @Override public void run() { println("Reconnecting to: " + UptimeClient.HOST + ':' + UptimeClient.PORT); client.connect(client.configureBootstrap(new Bootstrap(), loop)); } }, UptimeClient.RECONNECT_DELAY, TimeUnit.SECONDS); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { cause.printStackTrace(); ctx.close(); } void println(String msg) { if (startTime < 0) { System.err.format("[SERVER IS DOWN] %s%n", msg); } else { System.err.format("[UPTIME: %5ds] %s%n", (System.currentTimeMillis() - startTime) / 1000, msg); } } } public final class QuoteOfTheMomentClient { private ServerData config; public QuoteOfTheMomentClient(ServerData config){ this.config = config; } public void run() throws Exception { EventLoopGroup group = new NioEventLoopGroup(); try { Bootstrap b = new Bootstrap(); b.group(group) .channel(NioDatagramChannel.class) .option(ChannelOption.SO_BROADCAST, true) .handler(new QuoteOfTheMomentClientHandler()); Channel ch = b.bind(0).sync().channel(); ch.writeAndFlush(new DatagramPacket( Unpooled.copiedBuffer("QOTM?", CharsetUtil.UTF_8), new InetSocketAddress("192.168.2.193", 8193))).sync(); if (!ch.closeFuture().await(5000)) { System.err.println("QOTM request timed out."); } } catch(Exception ex) { ex.printStackTrace(); } finally { group.shutdownGracefully(); } } } public class QuoteOfTheMomentClientHandler extends SimpleChannelInboundHandler { @Override public void channelRead0(ChannelHandlerContext ctx, DatagramPacket msg) throws Exception { String response = msg.content().toString(CharsetUtil.UTF_8); if (response.startsWith("QOTM: ")) { System.out.println("Quote of the Moment: " + response.substring(6)); ctx.close(); } } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { cause.printStackTrace(); ctx.close(); } } 

如果您的服务器是Windows Server 2008(R2或R2 SP1),则此问题可能由此stackoverflow答案描述和解决, 该答案指的是Microsoft知识库文章#2577795

出现此问题是由于WinSock(Afd.sys)的辅助function驱动程序中的竞争条件导致套接字泄漏。 随着时间的推移,如果所有可用的套接字资源都耗尽,则会出现“症状”部分中描述的问题。


如果您的服务器是Windows Server 2003,则此问题可能由此stackoverflow答案描述和解决, 该答案引用Microsoft KB文章#196271

“适用于”一节中包含的产品的默认最大临时TCP端口数为5000。 这些产品中添加了一个新参数。 要增加临时端口的最大数量,请按照下列步骤操作…

…这基本上意味着你已经用完了短暂的端口。