Skip to content

Commit

Permalink
Passive
Browse files Browse the repository at this point in the history
  • Loading branch information
linyacool committed Feb 21, 2018
1 parent 4a71731 commit e19c7ee
Show file tree
Hide file tree
Showing 10 changed files with 12 additions and 19 deletions.
Binary file modified WebBench/bin/webbench
Binary file not shown.
Empty file modified WebBench/man/man1/webbench.1
100755 → 100644
Empty file.
Empty file modified WebBench/share/doc/webbench/changelog
100755 → 100644
Empty file.
Empty file modified WebBench/share/doc/webbench/copyright
100755 → 100644
Empty file.
8 changes: 4 additions & 4 deletions WebBench/socket.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,13 +53,13 @@ int Socket(const char *host, int clientPort)

if (sock < 0)
return sock;

// int optval = 1;
// if(setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, &optval, sizeof(optval)) == -1)
// return -1;
if (connect(sock, (struct sockaddr *)&ad, sizeof(ad)) < 0)
return -1;

// struct linger linger_;
// linger_.l_onoff = 0;
// linger_.l_linger = 0;
// setsockopt(sock, SOL_SOCKET, SO_LINGER,(const char *) &linger_, sizeof(linger_));
return sock;
}

2 changes: 1 addition & 1 deletion WebBench/test.sh
Original file line number Diff line number Diff line change
@@ -1 +1 @@
./bin/webbench -t 60 -c 1000 -2 -k --get http://127.0.0.1:80/hello
./bin/webbench -t 60 -c 1000 -2 --get http://127.0.0.1:80/hello
Binary file modified WebBench/webbench
Binary file not shown.
7 changes: 0 additions & 7 deletions WebBench/webbench.c
Original file line number Diff line number Diff line change
Expand Up @@ -535,10 +535,8 @@ void benchcore(const char *host,const int port,const char *req)
return;
}
s=Socket(host,port);
//printf("s = %d\n", s);
if(s<0) { failed++;continue;}
if(rlen!=write(s,req,rlen)) {failed++;close(s);continue;}
//printf("here 1\n");
if(http10==0)
if(shutdown(s,1)) { failed++;close(s);continue;}
if(force==0)
Expand All @@ -547,10 +545,7 @@ void benchcore(const char *host,const int port,const char *req)
while(1)
{
if(timerexpired) break;
//printf("before\n");
i=read(s,buf,1500);
//printf("i = %d\n", i);
/* fprintf(stderr,"%d\n",i); */
if(i<0)
{
failed++;
Expand All @@ -563,10 +558,8 @@ void benchcore(const char *host,const int port,const char *req)
bytes+=i;
}
}
//printf("here 2\n");
if(close(s)) {failed++; continue;}
speed++;
//printf("here\n");
}
}

Expand Down
12 changes: 6 additions & 6 deletions WebServer/HttpData.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -286,12 +286,12 @@ void HttpData::handleConn()
else
{
//cout << "close normally" << endl;
loop_->shutdown(channel_);
loop_->runInLoop(bind(&HttpData::handleClose, shared_from_this()));
// events_ |= (EPOLLIN | EPOLLET);
// //events_ |= (EPOLLIN | EPOLLET | EPOLLONESHOT);
// int timeout = DEFAULT_KEEP_ALIVE_TIME;
// loop_->updatePoller(channel_, timeout);
// loop_->shutdown(channel_);
// loop_->runInLoop(bind(&HttpData::handleClose, shared_from_this()));
events_ |= (EPOLLIN | EPOLLET);
//events_ |= (EPOLLIN | EPOLLET | EPOLLONESHOT);
int timeout = (DEFAULT_KEEP_ALIVE_TIME >> 1);
loop_->updatePoller(channel_, timeout);
}
}
else if (!error_ && connectionState_ == H_DISCONNECTING && (events_ & EPOLLOUT))
Expand Down
2 changes: 1 addition & 1 deletion 测试及改进.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

* 首先很明显的一点是长链接能处理的请求数是短连接的三四倍,因为没有了连接建立和断开的开销,不需要频繁accept和shutdown\close等系统调用,也不需要频繁建立和销毁对应的结构体。
* 我的服务器在最后的版本中,没有改进输入输出Buffer,用了效率低下的string,muduo用的是设计良好的vector<char>,我将在后续改进这一点。这也造成了在长连接的情况下,我的server逊于muduo。虽说边沿触发效率高一点,但是还是比不过在Buffer上性能的优化的。
* 短链接的情况下,我的服务器要超过Muduo很多。原因在于:Muduo的TcpConnection是被动关闭的,在收到Connection: Close这个选项时,也只是会shutdown写(检查写完后),只有在read到0时,才会关闭连接销毁对象,这样的逻辑没问题,我是这样做的,在收到Close选项时,shared_ptr管理的连接对象如果已经做完了左右该做的事,即收完分析完所有的请求,写完所有的应答信息,shared_ptr的生命期也就结束了,析构结构体触发close。这样的结果是,较多的TIMEWAIT状态换来处理失效上的提高,当然,一些选项如net.ipv4.tcp_tw_reuse(重用TIME-WAIT sockets),net.ipv4.tcp_tw_recycle(快速回收),net.ipv4.tcp_fin_timeout(默认的fin timeout)等选项来减少TIME-WAIT带来的影响
* 短链接的情况下,我的服务器要超过Muduo很多。原因在于:Muduo采用水平触发方式(Linux下用epoll),并且做法是每次Acceptor只accept一次就返回,面对突然的并发量,必然会因为频繁的epoll_wait耽误大量的时间,而我的做法是用while包裹accept,一直accept到不能再accept。当然,如果同时连接的请求很少,陈硕在书中也提到过,假如一次只有一个连接,那么我的方式就会多一次accpet才能跳出循环,但是这样的代价似乎微不足道啊,换来的效率却高了不少
* 空闲时,Server几乎不占CPU,短连接时,各线程的CPU负载比较均衡,长连接时,主线程负载0,线程池的线程负载接近100%,因为没有新的连接需要处理。各种情况均正常。
* 没有严格的考证,测试时发现,HTTP的header解析的结果用map比用unordered_map快,网上的博客里有很多人做了测试,我在做实验的时候大致也发现了。主要是因为数据量太小,一个HTTP请求头才几个头部字段,建立unordered_map的成本要比map高,数据量小,复杂度根本体现不出来。

Expand Down

0 comments on commit e19c7ee

Please sign in to comment.