2

I have a TCP echo server in Rust that randomly drops messages. I am trying to set a read timeout on the client socket.

client

use std::net::TcpStream;
use std::str;
use std::io::{self, BufRead, BufReader, Write};
use std::time::Duration;
use std::net::SocketAddr;

fn main() {
    let remote: SocketAddr = "127.0.0.1:8888".parse().unwrap();
    let mut stream = TcpStream::connect_timeout(&remote, Duration::from_secs(1)).expect("Could not connect to server");
    stream.set_read_timeout(Some(Duration::from_millis(1))).expect("Could not set a read timeout");
    loop {
        let mut input = String::new();
        let mut buffer: Vec<u8> = Vec::new();
        io::stdin().read_line(&mut input).expect("Failed to read from stdin");
        stream.write(input.as_bytes()).expect("Failed to write to server");

        let mut reader = BufReader::new(&stream);

        reader.read_until(b'\n', &mut buffer).expect("Could not read into buffer");
        print!("{}", str::from_utf8(&buffer).expect("Could not write buffer as string"));
    }
}

server

extern crate rand;

use std::net::{TcpListener, TcpStream};
use std::thread;
use rand::{thread_rng, Rng};

use std::io::{Error, Read, Write};

fn flip() -> bool {
    let choices = [true, false];
    let mut rng = thread_rng();
    *rng.choose(&choices).unwrap()
}

fn handle_client(mut stream: TcpStream) -> Result<(), Error> {
    let mut buf = [0; 512];
    loop {
        let bytes_read = stream.read(&mut buf)?;
        if bytes_read == 0 {
            return Ok(());
        }
        if flip() {
            return Ok(());
        }
        stream.write(&buf[..bytes_read])?;
    }
}

fn main() {
    let listener = TcpListener::bind("127.0.0.1:8888").expect("Could not bind");
    for stream in listener.incoming() {
        match stream {
            Err(e) => eprintln!("failed: {}", e),
            Ok(stream) => {
                thread::spawn(move || {
                    handle_client(stream).unwrap_or_else(|error| eprintln!("{:?}", error));
                });
            }
        }
    }
}

The set_read_timeout call does not seem to be doing anything; the client still waits for a few seconds between the server dropping the message and aborting the connection, even if I set the duration to 1ms.

$ rustc tcp-client-timeout.rs && ./tcp-client-timeout
test
test
foo
foo
bar


thread 'main' panicked at 'Failed to write to server: Error { repr: Os { code: 32, message: "Broken pipe" } }', src/libcore/result.rs:906:4
note: Run with `RUST_BACKTRACE=1` for a backtrace.
Shepmaster
  • 388,571
  • 95
  • 1,107
  • 1,366
ACC
  • 2,488
  • 6
  • 35
  • 61

1 Answers1

1

The set_read_timeout call does not seem to be doing anything; the client still waits for a few seconds between the server dropping the message and aborting the connection

This is not the experience I had running your code. In fact, when I add an eprintln directly after the read_until call, it prints almost immediately when the server decides to drop the connection. Indeed, the problem is not in the reading but the writing, as you can see from your error message:

thread 'main' panicked at 'Failed to write to server:

I'm guessing that you aren't "waiting a few seconds", but are instead pressing Enter multiple times (the blank lines between "bar" and the panic message also indicate this). Your read timeout is working just fine — the problem is that you are trying to write to a socket that the other side has already closed:

if flip() {
    return Ok(());
}

By exiting the loop, you are closing the socket completely. You can either detect this on the client side or continue the server loop instead of exiting it.

Shepmaster
  • 388,571
  • 95
  • 1,107
  • 1,366