Repository: S1mpleBug/muduo_cpp11 Branch: main Commit: 5a0a98235a91 Files: 40 Total size: 63.1 KB Directory structure: gitextract_yawb7po4/ ├── Acceptor.cc ├── Acceptor.h ├── Buffer.cc ├── Buffer.h ├── CMakeLists.txt ├── Callbacks.h ├── Channel.cc ├── Channel.h ├── CurrentThread.cc ├── CurrentThread.h ├── DefaultPoller.cc ├── EPollPoller.cc ├── EPollPoller.h ├── EventLoop.cc ├── EventLoop.h ├── EventLoopThread.cc ├── EventLoopThread.h ├── EventLoopThreadPool.cc ├── EventLoopThreadPool.h ├── InetAddress.cc ├── InetAddress.h ├── Logger.cc ├── Logger.h ├── Poller.cc ├── Poller.h ├── README.md ├── Socket.cc ├── Socket.h ├── TcpConnection.cc ├── TcpConnection.h ├── TcpServer.cc ├── TcpServer.h ├── Thread.cc ├── Thread.h ├── Timestamp.cc ├── Timestamp.h ├── build.sh ├── example/ │ ├── Makefile │ └── testserver.cc └── noncopyable.h ================================================ FILE CONTENTS ================================================ ================================================ FILE: Acceptor.cc ================================================ #include #include #include #include #include "Acceptor.h" #include "Logger.h" #include "InetAddress.h" static int createNonblocking() { int sockfd = ::socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC, IPPROTO_TCP); if (sockfd < 0) { LOG_FATAL("%s:%s:%d listen socket create err:%d\n", __FILE__, __FUNCTION__, __LINE__, errno); } return sockfd; } Acceptor::Acceptor(EventLoop *loop, const InetAddress &listenAddr, bool reuseport) : loop_(loop) , acceptSocket_(createNonblocking()) , acceptChannel_(loop, acceptSocket_.fd()) , listenning_(false) { acceptSocket_.setReuseAddr(true); acceptSocket_.setReusePort(true); acceptSocket_.bindAddress(listenAddr); // TcpServer::start() => Acceptor.listen() 如果有新用户连接 要执行一个回调(accept => connfd => 打包成Channel => 唤醒subloop) // baseloop监听到有事件发生 => acceptChannel_(listenfd) => 执行该回调函数 acceptChannel_.setReadCallback( std::bind(&Acceptor::handleRead, this)); } Acceptor::~Acceptor() { acceptChannel_.disableAll(); // 把从Poller中感兴趣的事件删除掉 acceptChannel_.remove(); // 调用EventLoop->removeChannel => Poller->removeChannel 把Poller的ChannelMap对应的部分删除 } void Acceptor::listen() { listenning_ = true; acceptSocket_.listen(); // listen acceptChannel_.enableReading(); // acceptChannel_注册至Poller !重要 } // listenfd有事件发生了,就是有新用户连接了 void Acceptor::handleRead() { InetAddress peerAddr; int connfd = acceptSocket_.accept(&peerAddr); if (connfd >= 0) { if (NewConnectionCallback_) { NewConnectionCallback_(connfd, peerAddr); // 轮询找到subLoop 唤醒并分发当前的新客户端的Channel } else { ::close(connfd); } } else { LOG_ERROR("%s:%s:%d accept err:%d\n", __FILE__, __FUNCTION__, __LINE__, errno); if (errno == EMFILE) { LOG_ERROR("%s:%s:%d sockfd reached limit\n", __FILE__, __FUNCTION__, __LINE__); } } } ================================================ FILE: Acceptor.h ================================================ #pragma once #include #include "noncopyable.h" #include "Socket.h" #include "Channel.h" class EventLoop; class InetAddress; class Acceptor : noncopyable { public: using NewConnectionCallback = std::function; Acceptor(EventLoop *loop, const InetAddress &listenAddr, bool reuseport); ~Acceptor(); void setNewConnectionCallback(const NewConnectionCallback &cb) { NewConnectionCallback_ = cb; } bool listenning() const { return listenning_; } void listen(); private: void handleRead(); EventLoop *loop_; // Acceptor用的就是用户定义的那个baseLoop 也称作mainLoop Socket acceptSocket_; Channel acceptChannel_; NewConnectionCallback NewConnectionCallback_; bool listenning_; }; ================================================ FILE: Buffer.cc ================================================ #include #include #include #include "Buffer.h" /** * 从fd上读取数据 Poller工作在LT模式 * Buffer缓冲区是有大小的! 但是从fd上读取数据的时候 却不知道tcp数据的最终大小 * * @description: 从socket读到缓冲区的方法是使用readv先读至buffer_, * Buffer_空间如果不够会读入到栈上65536个字节大小的空间,然后以append的 * 方式追加入buffer_。既考虑了避免系统调用带来开销,又不影响数据的接收。 **/ ssize_t Buffer::readFd(int fd, int *saveErrno) { // 栈额外空间,用于从套接字往出读时,当buffer_暂时不够用时暂存数据,待buffer_重新分配足够空间后,在把数据交换给buffer_。 char extrabuf[65536] = {0}; // 栈上内存空间 65536/1024 = 64KB /* struct iovec { ptr_t iov_base; // iov_base指向的缓冲区存放的是readv所接收的数据或是writev将要发送的数据 size_t iov_len; // iov_len在各种情况下分别确定了接收的最大长度以及实际写入的长度 }; */ // 使用iovec分配两个连续的缓冲区 struct iovec vec[2]; const size_t writable = writableBytes(); // 这是Buffer底层缓冲区剩余的可写空间大小 不一定能完全存储从fd读出的数据 // 第一块缓冲区,指向可写空间 vec[0].iov_base = begin() + writerIndex_; vec[0].iov_len = writable; // 第二块缓冲区,指向栈空间 vec[1].iov_base = extrabuf; vec[1].iov_len = sizeof(extrabuf); // when there is enough space in this buffer, don't read into extrabuf. // when extrabuf is used, we read 128k-1 bytes at most. // 这里之所以说最多128k-1字节,是因为若writable为64k-1,那么需要两个缓冲区 第一个64k-1 第二个64k 所以做多128k-1 // 如果第一个缓冲区>=64k 那就只采用一个缓冲区 而不使用栈空间extrabuf[65536]的内容 const int iovcnt = (writable < sizeof(extrabuf)) ? 2 : 1; const ssize_t n = ::readv(fd, vec, iovcnt); if (n < 0) { *saveErrno = errno; } else if (n <= writable) // Buffer的可写缓冲区已经够存储读出来的数据了 { writerIndex_ += n; } else // extrabuf里面也写入了n-writable长度的数据 { writerIndex_ = buffer_.size(); append(extrabuf, n - writable); // 对buffer_扩容 并将extrabuf存储的另一部分数据追加至buffer_ } return n; } // inputBuffer_.readFd表示将对端数据读到inputBuffer_中,移动writerIndex_指针 // outputBuffer_.writeFd标示将数据写入到outputBuffer_中,从readerIndex_开始,可以写readableBytes()个字节 ssize_t Buffer::writeFd(int fd, int *saveErrno) { ssize_t n = ::write(fd, peek(), readableBytes()); if (n < 0) { *saveErrno = errno; } return n; } ================================================ FILE: Buffer.h ================================================ #pragma once #include #include #include #include // 网络库底层的缓冲区类型定义 class Buffer { public: static const size_t kCheapPrepend = 8; static const size_t kInitialSize = 1024; explicit Buffer(size_t initalSize = kInitialSize) : buffer_(kCheapPrepend + initalSize) , readerIndex_(kCheapPrepend) , writerIndex_(kCheapPrepend) { } size_t readableBytes() const { return writerIndex_ - readerIndex_; } size_t writableBytes() const { return buffer_.size() - writerIndex_; } size_t prependableBytes() const { return readerIndex_; } // 返回缓冲区中可读数据的起始地址 const char *peek() const { return begin() + readerIndex_; } void retrieve(size_t len) { if (len < readableBytes()) { readerIndex_ += len; // 说明应用只读取了可读缓冲区数据的一部分,就是len长度 还剩下readerIndex+=len到writerIndex_的数据未读 } else // len == readableBytes() { retrieveAll(); } } void retrieveAll() { readerIndex_ = kCheapPrepend; writerIndex_ = kCheapPrepend; } // 把onMessage函数上报的Buffer数据 转成string类型的数据返回 std::string retrieveAllAsString() { return retrieveAsString(readableBytes()); } std::string retrieveAsString(size_t len) { std::string result(peek(), len); retrieve(len); // 上面一句把缓冲区中可读的数据已经读取出来 这里肯定要对缓冲区进行复位操作 return result; } // buffer_.size - writerIndex_ void ensureWritableBytes(size_t len) { if (writableBytes() < len) { makeSpace(len); // 扩容 } } // 把[data, data+len]内存上的数据添加到writable缓冲区当中 void append(const char *data, size_t len) { ensureWritableBytes(len); std::copy(data, data+len, beginWrite()); writerIndex_ += len; } char *beginWrite() { return begin() + writerIndex_; } const char *beginWrite() const { return begin() + writerIndex_; } // 从fd上读取数据 ssize_t readFd(int fd, int *saveErrno); // 通过fd发送数据 ssize_t writeFd(int fd, int *saveErrno); private: // vector底层数组首元素的地址 也就是数组的起始地址 char *begin() { return &*buffer_.begin(); } const char *begin() const { return &*buffer_.begin(); } void makeSpace(size_t len) { /** * | kCheapPrepend |xxx| reader | writer | // xxx标示reader中已读的部分 * | kCheapPrepend | reader | len | **/ if (writableBytes() + prependableBytes() < len + kCheapPrepend) // 也就是说 len > xxx + writer的部分 { buffer_.resize(writerIndex_ + len); } else // 这里说明 len <= xxx + writer 把reader搬到从xxx开始 使得xxx后面是一段连续空间 { size_t readable = readableBytes(); // readable = reader的长度 std::copy(begin() + readerIndex_, begin() + writerIndex_, // 把这一部分数据拷贝到begin+kCheapPrepend起始处 begin() + kCheapPrepend); readerIndex_ = kCheapPrepend; writerIndex_ = readerIndex_ + readable; } } std::vector buffer_; size_t readerIndex_; size_t writerIndex_; }; ================================================ FILE: CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.5) project(mymuduo) # mymuduo最终编译成so动态库 设置动态库的路径 放置项目根目录的lib文件夹下面 set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) # 设置调试信息 以及启动C++11语言标准 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -std=c++11") # 定义参与编译的源代码文件 aux_source_directory(. SRC_LIST) # 编译动态库 add_library(mymuduo SHARED ${SRC_LIST}) ================================================ FILE: Callbacks.h ================================================ #pragma once #include #include class Buffer; class TcpConnection; class Timestamp; using TcpConnectionPtr = std::shared_ptr; using ConnectionCallback = std::function; using CloseCallback = std::function; using WriteCompleteCallback = std::function; using HighWaterMarkCallback = std::function; using MessageCallback = std::function; ================================================ FILE: Channel.cc ================================================ #include #include "Channel.h" #include "EventLoop.h" #include "Logger.h" const int Channel::kNoneEvent = 0; const int Channel::kReadEvent = EPOLLIN | EPOLLPRI; const int Channel::kWriteEvent = EPOLLOUT; // EventLoop: ChannelList Poller Channel::Channel(EventLoop *loop, int fd) : loop_(loop) , fd_(fd) , events_(0) , revents_(0) , index_(-1) , tied_(false) { } Channel::~Channel() { } // channel的tie方法什么时候调用过? TcpConnection => channel /** * TcpConnection中注册了Chnanel对应的回调函数,传入的回调函数均为TcpConnection * 对象的成员方法,因此可以说明一点就是:Channel的结束一定早于TcpConnection对象! * 此处用tie去解决TcoConnection和Channel的生命周期时长问题,从而保证了Channel对 * 象能够在TcpConnection销毁前销毁。 **/ void Channel::tie(const std::shared_ptr &obj) { tie_ = obj; tied_ = true; } /** * 当改变channel所表示的fd的events事件后,update负责再poller里面更改fd相应的事件epoll_ctl **/ void Channel::update() { // 通过channel所属的eventloop,调用poller的相应方法,注册fd的events事件 loop_->updateChannel(this); } // 在channel所属的EventLoop中把当前的channel删除掉 void Channel::remove() { loop_->removeChannel(this); } void Channel::handleEvent(Timestamp receiveTime) { if (tied_) { std::shared_ptr guard = tie_.lock(); if (guard) { handleEventWithGuard(receiveTime); } // 如果提升失败了 就不做任何处理 说明Channel的TcpConnection对象已经不存在了 } else { handleEventWithGuard(receiveTime); } } void Channel::handleEventWithGuard(Timestamp receiveTime) { LOG_INFO("channel handleEvent revents:%d\n", revents_); // 关闭 if ((revents_ & EPOLLHUP) && !(revents_ & EPOLLIN)) // 当TcpConnection对应Channel 通过shutdown 关闭写端 epoll触发EPOLLHUP { if (closeCallback_) { closeCallback_(); } } // 错误 if (revents_ & EPOLLERR) { if (errorCallback_) { errorCallback_(); } } // 读 if (revents_ & (EPOLLIN | EPOLLPRI)) { if (readCallback_) { readCallback_(receiveTime); } } // 写 if (revents_ & EPOLLOUT) { if (writeCallback_) { writeCallback_(); } } } ================================================ FILE: Channel.h ================================================ #pragma once #include #include #include "noncopyable.h" #include "Timestamp.h" class EventLoop; /** * 理清楚 EventLoop、Channel、Poller之间的关系 Reactor模型上对应多路事件分发器 * Channel理解为通道 封装了sockfd和其感兴趣的event 如EPOLLIN、EPOLLOUT事件 还绑定了poller返回的具体事件 **/ class Channel : noncopyable { public: using EventCallback = std::function; // muduo仍使用typedef using ReadEventCallback = std::function; Channel(EventLoop *loop, int fd); ~Channel(); // fd得到Poller通知以后 处理事件 handleEvent在EventLoop::loop()中调用 void handleEvent(Timestamp receiveTime); // 设置回调函数对象 void setReadCallback(ReadEventCallback cb) { readCallback_ = std::move(cb); } void setWriteCallback(EventCallback cb) { writeCallback_ = std::move(cb); } void setCloseCallback(EventCallback cb) { closeCallback_ = std::move(cb); } void setErrorCallback(EventCallback cb) { errorCallback_ = std::move(cb); } // 防止当channel被手动remove掉 channel还在执行回调操作 void tie(const std::shared_ptr &); int fd() const { return fd_; } int events() const { return events_; } void set_revents(int revt) { revents_ = revt; } // 设置fd相应的事件状态 相当于epoll_ctl add delete void enableReading() { events_ |= kReadEvent; update(); } void disableReading() { events_ &= ~kReadEvent; update(); } void enableWriting() { events_ |= kWriteEvent; update(); } void disableWriting() { events_ &= ~kWriteEvent; update(); } void disableAll() { events_ = kNoneEvent; update(); } // 返回fd当前的事件状态 bool isNoneEvent() const { return events_ == kNoneEvent; } bool isWriting() const { return events_ & kWriteEvent; } bool isReading() const { return events_ & kReadEvent; } int index() { return index_; } void set_index(int idx) { index_ = idx; } // one loop per thread EventLoop *ownerLoop() { return loop_; } void remove(); private: void update(); void handleEventWithGuard(Timestamp receiveTime); static const int kNoneEvent; static const int kReadEvent; static const int kWriteEvent; EventLoop *loop_; // 事件循环 const int fd_; // fd,Poller监听的对象 int events_; // 注册fd感兴趣的事件 int revents_; // Poller返回的具体发生的事件 int index_; std::weak_ptr tie_; bool tied_; // 因为channel通道里可获知fd最终发生的具体的事件events,所以它负责调用具体事件的回调操作 ReadEventCallback readCallback_; EventCallback writeCallback_; EventCallback closeCallback_; EventCallback errorCallback_; }; ================================================ FILE: CurrentThread.cc ================================================ #include "CurrentThread.h" namespace CurrentThread { __thread int t_cachedTid = 0; void cacheTid() { if (t_cachedTid == 0) { t_cachedTid = static_cast(::syscall(SYS_gettid)); } } } ================================================ FILE: CurrentThread.h ================================================ #pragma once #include #include namespace CurrentThread { extern __thread int t_cachedTid; // 保存tid缓存 因为系统调用非常耗时 拿到tid后将其保存 void cacheTid(); inline int tid() // 内联函数只在当前文件中起作用 { if (__builtin_expect(t_cachedTid == 0, 0)) // __builtin_expect 是一种底层优化 此语句意思是如果还未获取tid 进入if 通过cacheTid()系统调用获取tid { cacheTid(); } return t_cachedTid; } } ================================================ FILE: DefaultPoller.cc ================================================ #include #include "Poller.h" #include "EPollPoller.h" Poller *Poller::newDefaultPoller(EventLoop *loop) { if (::getenv("MUDUO_USE_POLL")) { return nullptr; // 生成poll的实例 } else { return new EPollPoller(loop); // 生成epoll的实例 } } ================================================ FILE: EPollPoller.cc ================================================ #include #include #include #include "EPollPoller.h" #include "Logger.h" #include "Channel.h" const int kNew = -1; // 某个channel还没添加至Poller // channel的成员index_初始化为-1 const int kAdded = 1; // 某个channel已经添加至Poller const int kDeleted = 2; // 某个channel已经从Poller删除 EPollPoller::EPollPoller(EventLoop *loop) : Poller(loop) , epollfd_(::epoll_create1(EPOLL_CLOEXEC)) , events_(kInitEventListSize) // vector(16) { if (epollfd_ < 0) { LOG_FATAL("epoll_create error:%d \n", errno); } } EPollPoller::~EPollPoller() { ::close(epollfd_); } Timestamp EPollPoller::poll(int timeoutMs, ChannelList *activeChannels) { // 由于频繁调用poll 实际上应该用LOG_DEBUG输出日志更为合理 当遇到并发场景 关闭DEBUG日志提升效率 LOG_INFO("func=%s => fd total count:%lu\n", __FUNCTION__, channels_.size()); int numEvents = ::epoll_wait(epollfd_, &*events_.begin(), static_cast(events_.size()), timeoutMs); int saveErrno = errno; Timestamp now(Timestamp::now()); if (numEvents > 0) { LOG_INFO("%d events happend\n", numEvents); // LOG_DEBUG最合理 fillActiveChannels(numEvents, activeChannels); if (numEvents == events_.size()) // 扩容操作 { events_.resize(events_.size() * 2); } } else if (numEvents == 0) { LOG_DEBUG("%s timeout!\n", __FUNCTION__); } else { if (saveErrno != EINTR) { errno = saveErrno; LOG_ERROR("EPollPoller::poll() error!"); } } return now; } // channel update remove => EventLoop updateChannel removeChannel => Poller updateChannel removeChannel void EPollPoller::updateChannel(Channel *channel) { const int index = channel->index(); LOG_INFO("func=%s => fd=%d events=%d index=%d\n", __FUNCTION__, channel->fd(), channel->events(), index); if (index == kNew || index == kDeleted) { if (index == kNew) { int fd = channel->fd(); channels_[fd] = channel; } else // index == kAdd { } channel->set_index(kAdded); update(EPOLL_CTL_ADD, channel); } else // channel已经在Poller中注册过了 { int fd = channel->fd(); if (channel->isNoneEvent()) { update(EPOLL_CTL_DEL, channel); channel->set_index(kDeleted); } else { update(EPOLL_CTL_MOD, channel); } } } // 从Poller中删除channel void EPollPoller::removeChannel(Channel *channel) { int fd = channel->fd(); channels_.erase(fd); LOG_INFO("func=%s => fd=%d\n", __FUNCTION__, fd); int index = channel->index(); if (index == kAdded) { update(EPOLL_CTL_DEL, channel); } channel->set_index(kNew); } // 填写活跃的连接 void EPollPoller::fillActiveChannels(int numEvents, ChannelList *activeChannels) const { for (int i = 0; i < numEvents; ++i) { Channel *channel = static_cast(events_[i].data.ptr); channel->set_revents(events_[i].events); activeChannels->push_back(channel); // EventLoop就拿到了它的Poller给它返回的所有发生事件的channel列表了 } } // 更新channel通道 其实就是调用epoll_ctl add/mod/del void EPollPoller::update(int operation, Channel *channel) { epoll_event event; ::memset(&event, 0, sizeof(event)); int fd = channel->fd(); event.events = channel->events(); event.data.fd = fd; event.data.ptr = channel; if (::epoll_ctl(epollfd_, operation, fd, &event) < 0) { if (operation == EPOLL_CTL_DEL) { LOG_ERROR("epoll_ctl del error:%d\n", errno); } else { LOG_FATAL("epoll_ctl add/mod error:%d\n", errno); } } } ================================================ FILE: EPollPoller.h ================================================ #pragma once #include #include #include "Poller.h" #include "Timestamp.h" /** * epoll的使用: * 1. epoll_create * 2. epoll_ctl (add, mod, del) * 3. epoll_wait **/ class Channel; class EPollPoller : public Poller { public: EPollPoller(EventLoop *loop); ~EPollPoller() override; // 重写基类Poller的抽象方法 Timestamp poll(int timeoutMs, ChannelList *activeChannels) override; void updateChannel(Channel *channel) override; void removeChannel(Channel *channel) override; private: static const int kInitEventListSize = 16; // 填写活跃的连接 void fillActiveChannels(int numEvents, ChannelList *activeChannels) const; // 更新channel通道 其实就是调用epoll_ctl void update(int operation, Channel *channel); using EventList = std::vector; // C++中可以省略struct 直接写epoll_event即可 int epollfd_; // epoll_create创建返回的fd保存在epollfd_中 EventList events_; // 用于存放epoll_wait返回的所有发生的事件的文件描述符事件集 }; ================================================ FILE: EventLoop.cc ================================================ #include #include #include #include #include #include "EventLoop.h" #include "Logger.h" #include "Channel.h" #include "Poller.h" // 防止一个线程创建多个EventLoop __thread EventLoop *t_loopInThisThread = nullptr; // 定义默认的Poller IO复用接口的超时时间 const int kPollTimeMs = 10000; // 10000毫秒 = 10秒钟 /* 创建线程之后主线程和子线程谁先运行是不确定的。 * 通过一个eventfd在线程之间传递数据的好处是多个线程无需上锁就可以实现同步。 * eventfd支持的最低内核版本为Linux 2.6.27,在2.6.26及之前的版本也可以使用eventfd,但是flags必须设置为0。 * 函数原型: * #include * int eventfd(unsigned int initval, int flags); * 参数说明: * initval,初始化计数器的值。 * flags, EFD_NONBLOCK,设置socket为非阻塞。 * EFD_CLOEXEC,执行fork的时候,在父进程中的描述符会自动关闭,子进程中的描述符保留。 * 场景: * eventfd可以用于同一个进程之中的线程之间的通信。 * eventfd还可以用于同亲缘关系的进程之间的通信。 * eventfd用于不同亲缘关系的进程之间通信的话需要把eventfd放在几个进程共享的共享内存中(没有测试过)。 */ // 创建wakeupfd 用来notify唤醒subReactor处理新来的channel int createEventfd() { int evtfd = ::eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); if (evtfd < 0) { LOG_FATAL("eventfd error:%d\n", errno); } return evtfd; } EventLoop::EventLoop() : looping_(false) , quit_(false) , callingPendingFunctors_(false) , threadId_(CurrentThread::tid()) , poller_(Poller::newDefaultPoller(this)) , wakeupFd_(createEventfd()) , wakeupChannel_(new Channel(this, wakeupFd_)) { LOG_DEBUG("EventLoop created %p in thread %d\n", this, threadId_); if (t_loopInThisThread) { LOG_FATAL("Another EventLoop %p exists in this thread %d\n", t_loopInThisThread, threadId_); } else { t_loopInThisThread = this; } wakeupChannel_->setReadCallback( std::bind(&EventLoop::handleRead, this)); // 设置wakeupfd的事件类型以及发生事件后的回调操作 wakeupChannel_->enableReading(); // 每一个EventLoop都将监听wakeupChannel_的EPOLL读事件了 } EventLoop::~EventLoop() { wakeupChannel_->disableAll(); // 给Channel移除所有感兴趣的事件 wakeupChannel_->remove(); // 把Channel从EventLoop上删除掉 ::close(wakeupFd_); t_loopInThisThread = nullptr; } // 开启事件循环 void EventLoop::loop() { looping_ = true; quit_ = false; LOG_INFO("EventLoop %p start looping\n", this); while (!quit_) { activeChannels_.clear(); pollRetureTime_ = poller_->poll(kPollTimeMs, &activeChannels_); for (Channel *channel : activeChannels_) { // Poller监听哪些channel发生了事件 然后上报给EventLoop 通知channel处理相应的事件 channel->handleEvent(pollRetureTime_); } /** * 执行当前EventLoop事件循环需要处理的回调操作 对于线程数 >=2 的情况 IO线程 mainloop(mainReactor) 主要工作: * accept接收连接 => 将accept返回的connfd打包为Channel => TcpServer::newConnection通过轮询将TcpConnection对象分配给subloop处理 * * mainloop调用queueInLoop将回调加入subloop(该回调需要subloop执行 但subloop还在poller_->poll处阻塞) queueInLoop通过wakeup将subloop唤醒 **/ doPendingFunctors(); } LOG_INFO("EventLoop %p stop looping.\n", this); looping_ = false; } /** * 退出事件循环 * 1. 如果loop在自己的线程中调用quit成功了 说明当前线程已经执行完毕了loop()函数的poller_->poll并退出 * 2. 如果不是当前EventLoop所属线程中调用quit退出EventLoop 需要唤醒EventLoop所属线程的epoll_wait * * 比如在一个subloop(worker)中调用mainloop(IO)的quit时 需要唤醒mainloop(IO)的poller_->poll 让其执行完loop()函数 * * !!! 注意: 正常情况下 mainloop负责请求连接 将回调写入subloop中 通过生产者消费者模型即可实现线程安全的队列 * !!! 但是muduo通过wakeup()机制 使用eventfd创建的wakeupFd_ notify 使得mainloop和subloop之间能够进行通信 **/ void EventLoop::quit() { quit_ = true; if (!isInLoopThread()) { wakeup(); } } // 在当前loop中执行cb void EventLoop::runInLoop(Functor cb) { if (isInLoopThread()) // 当前EventLoop中执行回调 { cb(); } else // 在非当前EventLoop线程中执行cb,就需要唤醒EventLoop所在线程执行cb { queueInLoop(cb); } } // 把cb放入队列中 唤醒loop所在的线程执行cb void EventLoop::queueInLoop(Functor cb) { { std::unique_lock lock(mutex_); pendingFunctors_.emplace_back(cb); } /** * || callingPendingFunctors的意思是 当前loop正在执行回调中 但是loop的pendingFunctors_中又加入了新的回调 需要通过wakeup写事件 * 唤醒相应的需要执行上面回调操作的loop的线程 让loop()下一次poller_->poll()不再阻塞(阻塞的话会延迟前一次新加入的回调的执行),然后 * 继续执行pendingFunctors_中的回调函数 **/ if (!isInLoopThread() || callingPendingFunctors_) { wakeup(); // 唤醒loop所在线程 } } void EventLoop::handleRead() { uint64_t one = 1; ssize_t n = read(wakeupFd_, &one, sizeof(one)); if (n != sizeof(one)) { LOG_ERROR("EventLoop::handleRead() reads %lu bytes instead of 8\n", n); } } // 用来唤醒loop所在线程 向wakeupFd_写一个数据 wakeupChannel就发生读事件 当前loop线程就会被唤醒 void EventLoop::wakeup() { uint64_t one = 1; ssize_t n = write(wakeupFd_, &one, sizeof(one)); if (n != sizeof(one)) { LOG_ERROR("EventLoop::wakeup() writes %lu bytes instead of 8\n", n); } } // EventLoop的方法 => Poller的方法 void EventLoop::updateChannel(Channel *channel) { poller_->updateChannel(channel); } void EventLoop::removeChannel(Channel *channel) { poller_->removeChannel(channel); } bool EventLoop::hasChannel(Channel *channel) { return poller_->hasChannel(channel); } void EventLoop::doPendingFunctors() { std::vector functors; callingPendingFunctors_ = true; { std::unique_lock lock(mutex_); functors.swap(pendingFunctors_); // 交换的方式减少了锁的临界区范围 提升效率 同时避免了死锁 如果执行functor()在临界区内 且functor()中调用queueInLoop()就会产生死锁 } for (const Functor &functor : functors) { functor(); // 执行当前loop需要执行的回调操作 } callingPendingFunctors_ = false; } ================================================ FILE: EventLoop.h ================================================ #pragma once #include #include #include #include #include #include "noncopyable.h" #include "Timestamp.h" #include "CurrentThread.h" class Channel; class Poller; // 事件循环类 主要包含了两个大模块 Channel Poller(epoll的抽象) class EventLoop : noncopyable { public: using Functor = std::function; EventLoop(); ~EventLoop(); // 开启事件循环 void loop(); // 退出事件循环 void quit(); Timestamp pollReturnTime() const { pollRetureTime_; } // 在当前loop中执行 void runInLoop(Functor cb); // 把上层注册的回调函数cb放入队列中 唤醒loop所在的线程执行cb void queueInLoop(Functor cb); // 通过eventfd唤醒loop所在的线程 void wakeup(); // EventLoop的方法 => Poller的方法 void updateChannel(Channel *channel); void removeChannel(Channel *channel); bool hasChannel(Channel *channel); // 判断EventLoop对象是否在自己的线程里 bool isInLoopThread() const { return threadId_ == CurrentThread::tid(); } // threadId_为EventLoop创建时的线程id CurrentThread::tid()为当前线程id private: void handleRead(); // 给eventfd返回的文件描述符wakeupFd_绑定的事件回调 当wakeup()时 即有事件发生时 调用handleRead()读wakeupFd_的8字节 同时唤醒阻塞的epoll_wait void doPendingFunctors(); // 执行上层回调 using ChannelList = std::vector; std::atomic_bool looping_; // 原子操作 底层通过CAS实现 std::atomic_bool quit_; // 标识退出loop循环 const pid_t threadId_; // 记录当前EventLoop是被哪个线程id创建的 即标识了当前EventLoop的所属线程id Timestamp pollRetureTime_; // Poller返回发生事件的Channels的时间点 std::unique_ptr poller_; int wakeupFd_; // 作用:当mainLoop获取一个新用户的Channel 需通过轮询算法选择一个subLoop 通过该成员唤醒subLoop处理Channel std::unique_ptr wakeupChannel_; ChannelList activeChannels_; // 返回Poller检测到当前有事件发生的所有Channel列表 std::atomic_bool callingPendingFunctors_; // 标识当前loop是否有需要执行的回调操作 std::vector pendingFunctors_; // 存储loop需要执行的所有回调操作 std::mutex mutex_; // 互斥锁 用来保护上面vector容器的线程安全操作 }; ================================================ FILE: EventLoopThread.cc ================================================ #include "EventLoopThread.h" #include "EventLoop.h" EventLoopThread::EventLoopThread(const ThreadInitCallback &cb, const std::string &name) : loop_(nullptr) , exiting_(false) , thread_(std::bind(&EventLoopThread::threadFunc, this), name) , mutex_() , cond_() , callback_(cb) { } EventLoopThread::~EventLoopThread() { exiting_ = true; if (loop_ != nullptr) { loop_->quit(); thread_.join(); } } EventLoop *EventLoopThread::startLoop() { thread_.start(); // 启用底层线程Thread类对象thread_中通过start()创建的线程 EventLoop *loop = nullptr; { std::unique_lock lock(mutex_); while(loop_ == nullptr) { cond_.wait(lock); } loop = loop_; } return loop; } // 下面这个方法 是在单独的新线程里运行的 void EventLoopThread::threadFunc() { EventLoop loop; // 创建一个独立的EventLoop对象 和上面的线程是一一对应的 级one loop per thread if (callback_) { callback_(&loop); } { std::unique_lock lock(mutex_); loop_ = &loop; cond_.notify_one(); } loop.loop(); // 执行EventLoop的loop() 开启了底层的Poller的poll() std::unique_lock lock(mutex_); loop_ = nullptr; } ================================================ FILE: EventLoopThread.h ================================================ #pragma once #include #include #include #include #include "noncopyable.h" #include "Thread.h" class EventLoop; class EventLoopThread : noncopyable { public: using ThreadInitCallback = std::function; EventLoopThread(const ThreadInitCallback &cb = ThreadInitCallback(), const std::string &name = std::string()); ~EventLoopThread(); EventLoop *startLoop(); private: void threadFunc(); EventLoop *loop_; bool exiting_; Thread thread_; std::mutex mutex_; // 互斥锁 std::condition_variable cond_; // 条件变量 ThreadInitCallback callback_; }; ================================================ FILE: EventLoopThreadPool.cc ================================================ #include #include "EventLoopThreadPool.h" #include "EventLoopThread.h" EventLoopThreadPool::EventLoopThreadPool(EventLoop *baseLoop, const std::string &nameArg) : baseLoop_(baseLoop) , name_(nameArg) , started_(false) , numThreads_(0) , next_(0) { } EventLoopThreadPool::~EventLoopThreadPool() { // Don't delete loop, it's stack variable } void EventLoopThreadPool::start(const ThreadInitCallback &cb) { started_ = true; for(int i = 0; i < numThreads_; ++i) { char buf[name_.size() + 32]; snprintf(buf, sizeof buf, "%s%d", name_.c_str(), i); EventLoopThread *t = new EventLoopThread(cb, buf); threads_.push_back(std::unique_ptr(t)); loops_.push_back(t->startLoop()); // 底层创建线程 绑定一个新的EventLoop 并返回该loop的地址 } if(numThreads_ == 0 && cb) // 整个服务端只有一个线程运行baseLoop { cb(baseLoop_); } } // 如果工作在多线程中,baseLoop_(mainLoop)会默认以轮询的方式分配Channel给subLoop EventLoop *EventLoopThreadPool::getNextLoop() { EventLoop *loop = baseLoop_; // 如果只设置一个线程 也就是只有一个mainReactor 无subReactor 那么轮询只有一个线程 getNextLoop()每次都返回当前的baseLoop_ if(!loops_.empty()) // 通过轮询获取下一个处理事件的loop { loop = loops_[next_]; ++next_; if(next_ >= loops_.size()) { next_ = 0; } } return loop; } std::vector EventLoopThreadPool::getAllLoops() { if(loops_.empty()) { return std::vector(1, baseLoop_); } else { return loops_; } } ================================================ FILE: EventLoopThreadPool.h ================================================ #pragma once #include #include #include #include #include "noncopyable.h" class EventLoop; class EventLoopThread; class EventLoopThreadPool : noncopyable { public: using ThreadInitCallback = std::function; EventLoopThreadPool(EventLoop *baseLoop, const std::string &nameArg); ~EventLoopThreadPool(); void setThreadNum(int numThreads) { numThreads_ = numThreads; } void start(const ThreadInitCallback &cb = ThreadInitCallback()); // 如果工作在多线程中,baseLoop_(mainLoop)会默认以轮询的方式分配Channel给subLoop EventLoop *getNextLoop(); std::vector getAllLoops(); bool started() const { return started_; } const std::string name() const { return name_; } private: EventLoop *baseLoop_; // 用户使用muduo创建的loop 如果线程数为1 那直接使用用户创建的loop 否则创建多EventLoop std::string name_; bool started_; int numThreads_; int next_; // 轮询的下标 std::vector> threads_; std::vector loops_; }; ================================================ FILE: InetAddress.cc ================================================ #include #include #include "InetAddress.h" InetAddress::InetAddress(uint16_t port, std::string ip) { ::memset(&addr_, 0, sizeof(addr_)); addr_.sin_family = AF_INET; addr_.sin_port = ::htons(port); // 本地字节序转为网络字节序 addr_.sin_addr.s_addr = ::inet_addr(ip.c_str()); } std::string InetAddress::toIp() const { // addr_ char buf[64] = {0}; ::inet_ntop(AF_INET, &addr_.sin_addr, buf, sizeof buf); return buf; } std::string InetAddress::toIpPort() const { // ip:port char buf[64] = {0}; ::inet_ntop(AF_INET, &addr_.sin_addr, buf, sizeof buf); size_t end = ::strlen(buf); uint16_t port = ::ntohs(addr_.sin_port); sprintf(buf+end, ":%u", port); return buf; } uint16_t InetAddress::toPort() const { return ::ntohs(addr_.sin_port); } #if 0 #include int main() { InetAddress addr(8080); std::cout << addr.toIpPort() << std::endl; } #endif ================================================ FILE: InetAddress.h ================================================ #pragma once #include #include #include // 封装socket地址类型 class InetAddress { public: explicit InetAddress(uint16_t port = 0, std::string ip = "127.0.0.1"); explicit InetAddress(const sockaddr_in &addr) : addr_(addr) { } std::string toIp() const; std::string toIpPort() const; uint16_t toPort() const; const sockaddr_in *getSockAddr() const { return &addr_; } void setSockAddr(const sockaddr_in &addr) { addr_ = addr; } private: sockaddr_in addr_; }; ================================================ FILE: Logger.cc ================================================ #include #include "Logger.h" #include "Timestamp.h" // 获取日志唯一的实例对象 单例 Logger &Logger::instance() { static Logger logger; return logger; } // 设置日志级别 void Logger::setLogLevel(int level) { logLevel_ = level; } // 写日志 [级别信息] time : msg void Logger::log(std::string msg) { std::string pre = ""; switch (logLevel_) { case INFO: pre = "[INFO]"; break; case ERROR: pre = "[ERROR]"; break; case FATAL: pre = "[FATAL]"; break; case DEBUG: pre = "[DEBUG]"; break; default: break; } // 打印时间和msg std::cout << pre + Timestamp::now().toString() << " : " << msg << std::endl; } ================================================ FILE: Logger.h ================================================ #pragma once #include #include "noncopyable.h" // LOG_INFO("%s %d", arg1, arg2) #define LOG_INFO(logmsgFormat, ...) \ do \ { \ Logger &logger = Logger::instance(); \ logger.setLogLevel(INFO); \ char buf[1024] = {0}; \ snprintf(buf, 1024, logmsgFormat, ##__VA_ARGS__); \ logger.log(buf); \ } while (0) #define LOG_ERROR(logmsgFormat, ...) \ do \ { \ Logger &logger = Logger::instance(); \ logger.setLogLevel(ERROR); \ char buf[1024] = {0}; \ snprintf(buf, 1024, logmsgFormat, ##__VA_ARGS__); \ logger.log(buf); \ } while (0) #define LOG_FATAL(logmsgFormat, ...) \ do \ { \ Logger &logger = Logger::instance(); \ logger.setLogLevel(FATAL); \ char buf[1024] = {0}; \ snprintf(buf, 1024, logmsgFormat, ##__VA_ARGS__); \ logger.log(buf); \ exit(-1); \ } while (0) #ifdef MUDEBUG #define LOG_DEBUG(logmsgFormat, ...) \ do \ { \ Logger &logger = Logger::instance(); \ logger.setLogLevel(DEBUG); \ char buf[1024] = {0}; \ snprintf(buf, 1024, logmsgFormat, ##__VA_ARGS__); \ logger.log(buf); \ } while (0) #else #define LOG_DEBUG(logmsgFormat, ...) #endif // 定义日志的级别 INFO ERROR FATAL DEBUG enum LogLevel { INFO, // 普通信息 ERROR, // 错误信息 FATAL, // core dump信息 DEBUG, // 调试信息 }; // 输出一个日志类 class Logger : noncopyable { public: // 获取日志唯一的实例对象 单例 static Logger &instance(); // 设置日志级别 void setLogLevel(int level); // 写日志 void log(std::string msg); private: int logLevel_; }; ================================================ FILE: Poller.cc ================================================ #include "Poller.h" #include "Channel.h" Poller::Poller(EventLoop *loop) : ownerLoop_(loop) { } bool Poller::hasChannel(Channel *channel) const { auto it = channels_.find(channel->fd()); return it != channels_.end() && it->second == channel; } ================================================ FILE: Poller.h ================================================ #pragma once #include #include #include "noncopyable.h" #include "Timestamp.h" class Channel; class EventLoop; // muduo库中多路事件分发器的核心IO复用模块 class Poller { public: using ChannelList = std::vector; Poller(EventLoop *loop); virtual ~Poller() = default; // 给所有IO复用保留统一的接口 virtual Timestamp poll(int timeoutMs, ChannelList *activeChannels) = 0; virtual void updateChannel(Channel *channel) = 0; virtual void removeChannel(Channel *channel) = 0; // 判断参数channel是否在当前的Poller当中 bool hasChannel(Channel *channel) const; // EventLoop可以通过该接口获取默认的IO复用的具体实现 static Poller *newDefaultPoller(EventLoop *loop); protected: // map的key:sockfd value:sockfd所属的channel通道类型 using ChannelMap = std::unordered_map; ChannelMap channels_; private: EventLoop *ownerLoop_; // 定义Poller所属的事件循环EventLoop }; ================================================ FILE: README.md ================================================ # C++11 Muduo ![流程图](./img/a.png) ## 开发环境 * linux kernel version 4.4.0 (ubuntu 16.04 Server) * gcc version 5.4.0 * cmake version 3.5.1 项目编译执行`./build.sh`即可,测试用例进入`example/`文件夹,`make`即可生成服务器测试用例 ## 功能介绍 头文件生成至目录`/usr/include/mymuduo/`,`.so`库文件生成至目录`/usr/lib/`。 1. `EventLoop.*`、`Channel.*`、`Poller.*`、`EPollPoller.*`等主要用于事件轮询检测,并实现了事件分发处理的底层实现方法。`EventLoop`负责轮询执行`Poller`,要进行读、写、错误、关闭等事件时需执行哪些回调函数,均绑定至`Channel`中,只需从中调用即可,事件发生后进行相应的回调处理即可 2. `Thread.*`、`EventLoopThread.*`、`EventLoopThreadPool.*`等将线程和`EventLoop`事件轮询绑定在一起,实现真正意义上的`one loop per thread` 3. `TcpServer.*`、`TcpConnection.*`、`Acceptor.*`、`Socket.*`等是`mainloop`对网络连接的响应并轮询分发至各个`subloop`的实现,其中注册大量回调函数 4. `Buffer.*`为`muduo`网络库自行设计的自动扩容的缓冲区,保证数据有序性到达 ## 技术亮点 1. `EventLoop`中使用了`eventfd`来调用`wakeup()`,让`mainloop`唤醒`subloop`的`epoll_wait`阻塞 2. 在`EventLoop`中注册回调`cb`至`pendingFunctors_`,并在`doPendingFunctors`中通过`swap()`的方式,快速换出注册的回调,只在`swap()`时加锁,减少代码临界区长度,提升效率。(若不通过`swap()`的方式去处理,而是加锁执行`pendingFunctors`中的回调,然后解锁,会出现什么问题呢?1. 临界区过大,锁降低了服务器响应效率 2. 若执行的回调中执行`queueInLoop`需要抢占锁时,会发生死锁) 3. `Logger`可以设置日志等级,调试代码时可以开启`DEBUG`打印日志;若启动服务器,由于日志会影响服务器性能,可适当关闭`DEBUG`相关日志输出 4. 在`Thread`中通过`C++lambda`表达式以及信号量机制保证线程创建时的有序性,只有当线程获取到了其自己的`tid`后,才算启动线程完毕 5. `TcpConnection`继承自`enable_shared_from_this`,`TcpConnection`对象可以调用`shared_from_this()`方法给其内部回调函数,相当于创建了一个带引用计数的`shared_ptr`,可参考链接 [link](https://blog.csdn.net/gc348342215/article/details/123215888),同时`muduo`通过`tie()`方式解决了`TcpConnection`对象生命周期先于`Channel`结束的情况 6. `muduo`采用`Reactor`模型和多线程结合的方式,实现了高并发非阻塞网络库 ## 视频介绍 * [muduo源码剖析(1)-简介](https://www.bilibili.com/video/BV1nu411Q7Gq) * [muduo源码剖析(2)-muduo编写回射服务器实例](https://www.bilibili.com/video/BV1CY411g7AE) * [muduo源码剖析(3)-Timestamp类日志类](https://www.bilibili.com/video/BV1dF411x7A8) * [muduo源码剖析(4)-Channel类](https://www.bilibili.com/video/BV14a411h7JW) * [muduo源码剖析(5)-Poller类、EPollPoller类等相关](https://www.bilibili.com/video/BV1VL4y1u714) * [muduo源码剖析(6)-EventLoop类介绍1](https://www.bilibili.com/video/BV1aY411g7As) * [muduo源码剖析(7)-EventLoop类介绍2](https://www.bilibili.com/video/BV1kS4y1S7DC) * [muduo源码剖析(8)-Thread类、EventLoopThread类](https://www.bilibili.com/video/BV1GL411P73C) * [muduo源码剖析(9)-EventLoopThreadPool类](https://www.bilibili.com/video/BV1yS4y1S7FY) * [muduo源码剖析(10)-InetAddress类、Socket类](https://www.bilibili.com/video/BV1UU4y1o7BT) * [muduo源码剖析(11)-Acceptor类1](https://www.bilibili.com/video/BV1q3411W79d) * [muduo源码剖析(12)-Acceptor类2](https://www.bilibili.com/video/BV1Ua411b7aV) * [muduo源码剖析(13)-TcpConnection类、Buffer类](https://www.bilibili.com/video/BV1hS4y137Eg) * [muduo源码剖析(14)-TcpConnection类、Buffer类2](https://www.bilibili.com/video/BV1PS4y1D74z) * [muduo源码剖析(15)-TcpConnection类](https://www.bilibili.com/video/BV1L3411p7jy) * [muduo源码剖析(16)-TcpServer类](https://www.bilibili.com/video/BV13Y411u74h) 持续更新.. ================================================ FILE: Socket.cc ================================================ #include #include #include #include #include #include #include "Socket.h" #include "Logger.h" #include "InetAddress.h" Socket::~Socket() { ::close(sockfd_); } void Socket::bindAddress(const InetAddress &localaddr) { if (0 != ::bind(sockfd_, (sockaddr *)localaddr.getSockAddr(), sizeof(sockaddr_in))) { LOG_FATAL("bind sockfd:%d fail\n", sockfd_); } } void Socket::listen() { if (0 != ::listen(sockfd_, 1024)) { LOG_FATAL("listen sockfd:%d fail\n", sockfd_); } } int Socket::accept(InetAddress *peeraddr) { /** * 1. accept函数的参数不合法 * 2. 对返回的connfd没有设置非阻塞 * Reactor模型 one loop per thread * poller + non-blocking IO **/ sockaddr_in addr; socklen_t len = sizeof(addr); ::memset(&addr, 0, sizeof(addr)); // fixed : int connfd = ::accept(sockfd_, (sockaddr *)&addr, &len); int connfd = ::accept4(sockfd_, (sockaddr *)&addr, &len, SOCK_NONBLOCK | SOCK_CLOEXEC); if (connfd >= 0) { peeraddr->setSockAddr(addr); } return connfd; } void Socket::shutdownWrite() { if (::shutdown(sockfd_, SHUT_WR) < 0) { LOG_ERROR("shutdownWrite error"); } } void Socket::setTcpNoDelay(bool on) { int optval = on ? 1 : 0; ::setsockopt(sockfd_, IPPROTO_TCP, TCP_NODELAY, &optval, sizeof(optval)); // TCP_NODELAY包含头文件 } void Socket::setReuseAddr(bool on) { int optval = on ? 1 : 0; ::setsockopt(sockfd_, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); // TCP_NODELAY包含头文件 } void Socket::setReusePort(bool on) { int optval = on ? 1 : 0; ::setsockopt(sockfd_, SOL_SOCKET, SO_REUSEPORT, &optval, sizeof(optval)); // TCP_NODELAY包含头文件 } void Socket::setKeepAlive(bool on) { int optval = on ? 1 : 0; ::setsockopt(sockfd_, SOL_SOCKET, SO_KEEPALIVE, &optval, sizeof(optval)); // TCP_NODELAY包含头文件 } ================================================ FILE: Socket.h ================================================ #pragma once #include "noncopyable.h" class InetAddress; // 封装socket fd class Socket : noncopyable { public: explicit Socket(int sockfd) : sockfd_(sockfd) { } ~Socket(); int fd() const { return sockfd_; } void bindAddress(const InetAddress &localaddr); void listen(); int accept(InetAddress *peeraddr); void shutdownWrite(); void setTcpNoDelay(bool on); void setReuseAddr(bool on); void setReusePort(bool on); void setKeepAlive(bool on); private: const int sockfd_; }; ================================================ FILE: TcpConnection.cc ================================================ #include #include #include #include #include #include #include #include "TcpConnection.h" #include "Logger.h" #include "Socket.h" #include "Channel.h" #include "EventLoop.h" static EventLoop *CheckLoopNotNull(EventLoop *loop) { if (loop == nullptr) { LOG_FATAL("%s:%s:%d mainLoop is null!\n", __FILE__, __FUNCTION__, __LINE__); } return loop; } TcpConnection::TcpConnection(EventLoop *loop, const std::string &nameArg, int sockfd, const InetAddress &localAddr, const InetAddress &peerAddr) : loop_(CheckLoopNotNull(loop)) , name_(nameArg) , state_(kConnecting) , reading_(true) , socket_(new Socket(sockfd)) , channel_(new Channel(loop, sockfd)) , localAddr_(localAddr) , peerAddr_(peerAddr) , highWaterMark_(64 * 1024 * 1024) // 64M { // 下面给channel设置相应的回调函数 poller给channel通知感兴趣的事件发生了 channel会回调相应的回调函数 channel_->setReadCallback( std::bind(&TcpConnection::handleRead, this, std::placeholders::_1)); channel_->setWriteCallback( std::bind(&TcpConnection::handleWrite, this)); channel_->setCloseCallback( std::bind(&TcpConnection::handleClose, this)); channel_->setErrorCallback( std::bind(&TcpConnection::handleError, this)); LOG_INFO("TcpConnection::ctor[%s] at fd=%d\n", name_.c_str(), sockfd); socket_->setKeepAlive(true); } TcpConnection::~TcpConnection() { LOG_INFO("TcpConnection::dtor[%s] at fd=%d state=%d\n", name_.c_str(), channel_->fd(), (int)state_); } void TcpConnection::send(const std::string &buf) { if (state_ == kConnected) { if (loop_->isInLoopThread()) // 这种是对于单个reactor的情况 用户调用conn->send时 loop_即为当前线程 { sendInLoop(buf.c_str(), buf.size()); } else { loop_->runInLoop( std::bind(&TcpConnection::sendInLoop, this, buf.c_str(), buf.size())); } } } /** * 发送数据 应用写的快 而内核发送数据慢 需要把待发送数据写入缓冲区,而且设置了水位回调 **/ void TcpConnection::sendInLoop(const void *data, size_t len) { ssize_t nwrote = 0; size_t remaining = len; bool faultError = false; if (state_ == kDisconnected) // 之前调用过该connection的shutdown 不能再进行发送了 { LOG_ERROR("disconnected, give up writing"); } // 表示channel_第一次开始写数据或者缓冲区没有待发送数据 if (!channel_->isWriting() && outputBuffer_.readableBytes() == 0) { nwrote = ::write(channel_->fd(), data, len); if (nwrote >= 0) { remaining = len - nwrote; if (remaining == 0 && writeCompleteCallback_) { // 既然在这里数据全部发送完成,就不用再给channel设置epollout事件了 loop_->queueInLoop( std::bind(writeCompleteCallback_, shared_from_this())); } } else // nwrote < 0 { nwrote = 0; if (errno != EWOULDBLOCK) // EWOULDBLOCK表示非阻塞情况下没有数据后的正常返回 等同于EAGAIN { LOG_ERROR("TcpConnection::sendInLoop"); if (errno == EPIPE || errno == ECONNRESET) // SIGPIPE RESET { faultError = true; } } } } /** * 说明当前这一次write并没有把数据全部发送出去 剩余的数据需要保存到缓冲区当中 * 然后给channel注册EPOLLOUT事件,Poller发现tcp的发送缓冲区有空间后会通知 * 相应的sock->channel,调用channel对应注册的writeCallback_回调方法, * channel的writeCallback_实际上就是TcpConnection设置的handleWrite回调, * 把发送缓冲区outputBuffer_的内容全部发送完成 **/ if (!faultError && remaining > 0) { // 目前发送缓冲区剩余的待发送的数据的长度 size_t oldLen = outputBuffer_.readableBytes(); if (oldLen + remaining >= highWaterMark_ && oldLen < highWaterMark_ && highWaterMarkCallback_) { loop_->queueInLoop( std::bind(highWaterMarkCallback_, shared_from_this(), oldLen + remaining)); } outputBuffer_.append((char *)data + nwrote, remaining); if (!channel_->isWriting()) { channel_->enableWriting(); // 这里一定要注册channel的写事件 否则poller不会给channel通知epollout } } } void TcpConnection::shutdown() { if (state_ == kConnected) { setState(kDisconnecting); loop_->runInLoop( std::bind(&TcpConnection::shutdownInLoop, this)); } } void TcpConnection::shutdownInLoop() { if (!channel_->isWriting()) // 说明当前outputBuffer_的数据全部向外发送完成 { socket_->shutdownWrite(); } } // 连接建立 void TcpConnection::connectEstablished() { setState(kConnected); channel_->tie(shared_from_this()); channel_->enableReading(); // 向poller注册channel的EPOLLIN读事件 // 新连接建立 执行回调 connectionCallback_(shared_from_this()); } // 连接销毁 void TcpConnection::connectDestroyed() { if (state_ == kConnected) { setState(kDisconnected); channel_->disableAll(); // 把channel的所有感兴趣的事件从poller中删除掉 connectionCallback_(shared_from_this()); } channel_->remove(); // 把channel从poller中删除掉 } // 读是相对服务器而言的 当对端客户端有数据到达 服务器端检测到EPOLLIN 就会触发该fd上的回调 handleRead取读走对端发来的数据 void TcpConnection::handleRead(Timestamp receiveTime) { int savedErrno = 0; ssize_t n = inputBuffer_.readFd(channel_->fd(), &savedErrno); if (n > 0) // 有数据到达 { // 已建立连接的用户有可读事件发生了 调用用户传入的回调操作onMessage shared_from_this就是获取了TcpConnection的智能指针 messageCallback_(shared_from_this(), &inputBuffer_, receiveTime); } else if (n == 0) // 客户端断开 { handleClose(); } else // 出错了 { errno = savedErrno; LOG_ERROR("TcpConnection::handleRead"); handleError(); } } void TcpConnection::handleWrite() { if (channel_->isWriting()) { int savedErrno = 0; ssize_t n = outputBuffer_.writeFd(channel_->fd(), &savedErrno); if (n > 0) { outputBuffer_.retrieve(n); if (outputBuffer_.readableBytes() == 0) { channel_->disableWriting(); if (writeCompleteCallback_) { // TcpConnection对象在其所在的subloop中 向pendingFunctors_中加入回调 loop_->queueInLoop( std::bind(writeCompleteCallback_, shared_from_this())); } if (state_ == kDisconnecting) { shutdownInLoop(); // 在当前所属的loop中把TcpConnection删除掉 } } } else { LOG_ERROR("TcpConnection::handleWrite"); } } else { LOG_ERROR("TcpConnection fd=%d is down, no more writing", channel_->fd()); } } void TcpConnection::handleClose() { LOG_INFO("TcpConnection::handleClose fd=%d state=%d\n", channel_->fd(), (int)state_); setState(kDisconnected); channel_->disableAll(); TcpConnectionPtr connPtr(shared_from_this()); connectionCallback_(connPtr); // 执行连接关闭的回调 closeCallback_(connPtr); // 执行关闭连接的回调 执行的是TcpServer::removeConnection回调方法 // must be the last line } void TcpConnection::handleError() { int optval; socklen_t optlen = sizeof optval; int err = 0; if (::getsockopt(channel_->fd(), SOL_SOCKET, SO_ERROR, &optval, &optlen) < 0) { err = errno; } else { err = optval; } LOG_ERROR("TcpConnection::handleError name:%s - SO_ERROR:%d\n", name_.c_str(), err); } ================================================ FILE: TcpConnection.h ================================================ #pragma once #include #include #include #include "noncopyable.h" #include "InetAddress.h" #include "Callbacks.h" #include "Buffer.h" #include "Timestamp.h" class Channel; class EventLoop; class Socket; /** * TcpServer => Acceptor => 有一个新用户连接,通过accept函数拿到connfd * => TcpConnection设置回调 => 设置到Channel => Poller => Channel回调 **/ class TcpConnection : noncopyable, public std::enable_shared_from_this { public: TcpConnection(EventLoop *loop, const std::string &nameArg, int sockfd, const InetAddress &localAddr, const InetAddress &peerAddr); ~TcpConnection(); EventLoop *getLoop() const { return loop_; } const std::string &name() const { return name_; } const InetAddress &localAddress() const { return localAddr_; } const InetAddress &peerAddress() const { return peerAddr_; } bool connected() const { return state_ == kConnected; } // 发送数据 void send(const std::string &buf); // 关闭连接 void shutdown(); void setConnectionCallback(const ConnectionCallback &cb) { connectionCallback_ = cb; } void setMessageCallback(const MessageCallback &cb) { messageCallback_ = cb; } void setWriteCompleteCallback(const WriteCompleteCallback &cb) { writeCompleteCallback_ = cb; } void setCloseCallback(const CloseCallback &cb) { closeCallback_ = cb; } void setHighWaterMarkCallback(const HighWaterMarkCallback &cb, size_t highWaterMark) { highWaterMarkCallback_ = cb; highWaterMark_ = highWaterMark; } // 连接建立 void connectEstablished(); // 连接销毁 void connectDestroyed(); private: enum StateE { kDisconnected, // 已经断开连接 kConnecting, // 正在连接 kConnected, // 已连接 kDisconnecting // 正在断开连接 }; void setState(StateE state) { state_ = state; } void handleRead(Timestamp receiveTime); void handleWrite(); void handleClose(); void handleError(); void sendInLoop(const void *data, size_t len); void shutdownInLoop(); EventLoop *loop_; // 这里是baseloop还是subloop由TcpServer中创建的线程数决定 若为多Reactor 该loop_指向subloop 若为单Reactor 该loop_指向baseloop const std::string name_; std::atomic_int state_; bool reading_; // Socket Channel 这里和Acceptor类似 Acceptor => mainloop TcpConnection => subloop std::unique_ptr socket_; std::unique_ptr channel_; const InetAddress localAddr_; const InetAddress peerAddr_; // 这些回调TcpServer也有 用户通过写入TcpServer注册 TcpServer再将注册的回调传递给TcpConnection TcpConnection再将回调注册到Channel中 ConnectionCallback connectionCallback_; // 有新连接时的回调 MessageCallback messageCallback_; // 有读写消息时的回调 WriteCompleteCallback writeCompleteCallback_; // 消息发送完成以后的回调 HighWaterMarkCallback highWaterMarkCallback_; CloseCallback closeCallback_; size_t highWaterMark_; // 数据缓冲区 Buffer inputBuffer_; // 接收数据的缓冲区 Buffer outputBuffer_; // 发送数据的缓冲区 用户send向outputBuffer_发 }; ================================================ FILE: TcpServer.cc ================================================ #include #include #include "TcpServer.h" #include "Logger.h" #include "TcpConnection.h" static EventLoop *CheckLoopNotNull(EventLoop *loop) { if (loop == nullptr) { LOG_FATAL("%s:%s:%d mainLoop is null!\n", __FILE__, __FUNCTION__, __LINE__); } return loop; } TcpServer::TcpServer(EventLoop *loop, const InetAddress &listenAddr, const std::string &nameArg, Option option) : loop_(CheckLoopNotNull(loop)) , ipPort_(listenAddr.toIpPort()) , name_(nameArg) , acceptor_(new Acceptor(loop, listenAddr, option == kReusePort)) , threadPool_(new EventLoopThreadPool(loop, name_)) , connectionCallback_() , messageCallback_() , nextConnId_(1) , started_(0) { // 当有新用户连接时,Acceptor类中绑定的acceptChannel_会有读事件发生,执行handleRead()调用TcpServer::newConnection回调 acceptor_->setNewConnectionCallback( std::bind(&TcpServer::newConnection, this, std::placeholders::_1, std::placeholders::_2)); } TcpServer::~TcpServer() { for(auto &item : connections_) { TcpConnectionPtr conn(item.second); item.second.reset(); // 把原始的智能指针复位 让栈空间的TcpConnectionPtr conn指向该对象 当conn出了其作用域 即可释放智能指针指向的对象 // 销毁连接 conn->getLoop()->runInLoop( std::bind(&TcpConnection::connectDestroyed, conn)); } } // 设置底层subloop的个数 void TcpServer::setThreadNum(int numThreads) { threadPool_->setThreadNum(numThreads); } // 开启服务器监听 void TcpServer::start() { if (started_++ == 0) // 防止一个TcpServer对象被start多次 { threadPool_->start(threadInitCallback_); // 启动底层的loop线程池 loop_->runInLoop(std::bind(&Acceptor::listen, acceptor_.get())); } } // 有一个新用户连接,acceptor会执行这个回调操作,负责将mainLoop接收到的请求连接(acceptChannel_会有读事件发生)通过回调轮询分发给subLoop去处理 void TcpServer::newConnection(int sockfd, const InetAddress &peerAddr) { // 轮询算法 选择一个subLoop 来管理connfd对应的channel EventLoop *ioLoop = threadPool_->getNextLoop(); char buf[64] = {0}; snprintf(buf, sizeof buf, "-%s#%d", ipPort_.c_str(), nextConnId_); ++nextConnId_; // 这里没有设置为原子类是因为其只在mainloop中执行 不涉及线程安全问题 std::string connName = name_ + buf; LOG_INFO("TcpServer::newConnection [%s] - new connection [%s] from %s\n", name_.c_str(), connName.c_str(), peerAddr.toIpPort().c_str()); // 通过sockfd获取其绑定的本机的ip地址和端口信息 sockaddr_in local; ::memset(&local, 0, sizeof(local)); socklen_t addrlen = sizeof(local); if(::getsockname(sockfd, (sockaddr *)&local, &addrlen) < 0) { LOG_ERROR("sockets::getLocalAddr"); } InetAddress localAddr(local); TcpConnectionPtr conn(new TcpConnection(ioLoop, connName, sockfd, localAddr, peerAddr)); connections_[connName] = conn; // 下面的回调都是用户设置给TcpServer => TcpConnection的,至于Channel绑定的则是TcpConnection设置的四个,handleRead,handleWrite... 这下面的回调用于handlexxx函数中 conn->setConnectionCallback(connectionCallback_); conn->setMessageCallback(messageCallback_); conn->setWriteCompleteCallback(writeCompleteCallback_); // 设置了如何关闭连接的回调 conn->setCloseCallback( std::bind(&TcpServer::removeConnection, this, std::placeholders::_1)); ioLoop->runInLoop( std::bind(&TcpConnection::connectEstablished, conn)); } void TcpServer::removeConnection(const TcpConnectionPtr &conn) { loop_->runInLoop( std::bind(&TcpServer::removeConnectionInLoop, this, conn)); } void TcpServer::removeConnectionInLoop(const TcpConnectionPtr &conn) { LOG_INFO("TcpServer::removeConnectionInLoop [%s] - connection %s\n", name_.c_str(), conn->name().c_str()); connections_.erase(conn->name()); EventLoop *ioLoop = conn->getLoop(); ioLoop->queueInLoop( std::bind(&TcpConnection::connectDestroyed, conn)); } ================================================ FILE: TcpServer.h ================================================ #pragma once /** * 用户使用muduo编写服务器程序 **/ #include #include #include #include #include #include "EventLoop.h" #include "Acceptor.h" #include "InetAddress.h" #include "noncopyable.h" #include "EventLoopThreadPool.h" #include "Callbacks.h" #include "TcpConnection.h" #include "Buffer.h" // 对外的服务器编程使用的类 class TcpServer { public: using ThreadInitCallback = std::function; enum Option { kNoReusePort, kReusePort, }; TcpServer(EventLoop *loop, const InetAddress &listenAddr, const std::string &nameArg, Option option = kNoReusePort); ~TcpServer(); void setThreadInitCallback(const ThreadInitCallback &cb) { threadInitCallback_ = cb; } void setConnectionCallback(const ConnectionCallback &cb) { connectionCallback_ = cb; } void setMessageCallback(const MessageCallback &cb) { messageCallback_ = cb; } void setWriteCompleteCallback(const WriteCompleteCallback &cb) { writeCompleteCallback_ = cb; } // 设置底层subloop的个数 void setThreadNum(int numThreads); // 开启服务器监听 void start(); private: void newConnection(int sockfd, const InetAddress &peerAddr); void removeConnection(const TcpConnectionPtr &conn); void removeConnectionInLoop(const TcpConnectionPtr &conn); using ConnectionMap = std::unordered_map; EventLoop *loop_; // baseloop 用户自定义的loop const std::string ipPort_; const std::string name_; std::unique_ptr acceptor_; // 运行在mainloop 任务就是监听新连接事件 std::shared_ptr threadPool_; // one loop per thread ConnectionCallback connectionCallback_; //有新连接时的回调 MessageCallback messageCallback_; // 有读写事件发生时的回调 WriteCompleteCallback writeCompleteCallback_; // 消息发送完成后的回调 ThreadInitCallback threadInitCallback_; // loop线程初始化的回调 std::atomic_int started_; int nextConnId_; ConnectionMap connections_; // 保存所有的连接 }; ================================================ FILE: Thread.cc ================================================ #include "Thread.h" #include "CurrentThread.h" #include std::atomic_int Thread::numCreated_(0); Thread::Thread(ThreadFunc func, const std::string &name) : started_(false) , joined_(false) , tid_(0) , func_(std::move(func)) , name_(name) { setDefaultName(); } Thread::~Thread() { if (started_ && !joined_) { thread_->detach(); // thread类提供了设置分离线程的方法 线程运行后自动销毁(非阻塞) } } void Thread::start() // 一个Thread对象 记录的就是一个新线程的详细信息 { started_ = true; sem_t sem; sem_init(&sem, false, 0); // false指的是 不设置进程间共享 // 开启线程 thread_ = std::shared_ptr(new std::thread([&]() { tid_ = CurrentThread::tid(); // 获取线程的tid值 sem_post(&sem); func_(); // 开启一个新线程 专门执行该线程函数 })); // 这里必须等待获取上面新创建的线程的tid值 sem_wait(&sem); } // C++ std::thread 中join()和detach()的区别:https://blog.nowcoder.net/n/8fcd9bb6e2e94d9596cf0a45c8e5858a void Thread::join() { joined_ = true; thread_->join(); } void Thread::setDefaultName() { int num = ++numCreated_; if (name_.empty()) { char buf[32] = {0}; snprintf(buf, sizeof buf, "Thread%d", num); name_ = buf; } } ================================================ FILE: Thread.h ================================================ #pragma once #include #include #include #include #include #include #include "noncopyable.h" class Thread : noncopyable { public: using ThreadFunc = std::function; explicit Thread(ThreadFunc, const std::string &name = std::string()); ~Thread(); void start(); void join(); bool started() { return started_; } pid_t tid() const { return tid_; } const std::string &name() const { return name_; } static int numCreated() { return numCreated_; } private: void setDefaultName(); bool started_; bool joined_; std::shared_ptr thread_; pid_t tid_; // 在线程创建时再绑定 ThreadFunc func_; // 线程回调函数 std::string name_; static std::atomic_int numCreated_; }; ================================================ FILE: Timestamp.cc ================================================ #include #include "Timestamp.h" Timestamp::Timestamp() : microSecondsSinceEpoch_(0) { } Timestamp::Timestamp(int64_t microSecondsSinceEpoch) : microSecondsSinceEpoch_(microSecondsSinceEpoch) { } Timestamp Timestamp::now() { return Timestamp(time(NULL)); } std::string Timestamp::toString() const { char buf[128] = {0}; tm *tm_time = localtime(µSecondsSinceEpoch_); snprintf(buf, 128, "%4d/%02d/%02d %02d:%02d:%02d", tm_time->tm_year + 1900, tm_time->tm_mon + 1, tm_time->tm_mday, tm_time->tm_hour, tm_time->tm_min, tm_time->tm_sec); return buf; } // #include // int main() { // std::cout << Timestamp::now().toString() << std::endl; // return 0; // } ================================================ FILE: Timestamp.h ================================================ #pragma once #include #include class Timestamp { public: Timestamp(); explicit Timestamp(int64_t microSecondsSinceEpoch); static Timestamp now(); std::string toString() const; private: int64_t microSecondsSinceEpoch_; }; ================================================ FILE: build.sh ================================================ #!/bin/bash set -e # 如果没有build目录 创建该目录 if [ ! -d `pwd`/build ]; then mkdir `pwd`/build fi rm -fr `pwd`/build/* cd `pwd`/build && cmake .. && make # 回到项目根目录 cd .. # 把头文件拷贝到 /usr/include/mymuduo .so库拷贝到 /usr/lib if [ ! -d /usr/include/mymuduo ]; then mkdir /usr/include/mymuduo fi for header in `ls *.h` do cp $header /usr/include/mymuduo done cp `pwd`/lib/libmymuduo.so /usr/lib ldconfig ================================================ FILE: example/Makefile ================================================ testserver : g++ -g -o testserver testserver.cc -lmymuduo -lpthread -std=c++11 clean : rm -f testserver ================================================ FILE: example/testserver.cc ================================================ #include #include #include class EchoServer { public: EchoServer(EventLoop *loop, const InetAddress &addr, const std::string &name) : server_(loop, addr, name) , loop_(loop) { // 注册回调函数 server_.setConnectionCallback( std::bind(&EchoServer::onConnection, this, std::placeholders::_1)); server_.setMessageCallback( std::bind(&EchoServer::onMessage, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); // 设置合适的subloop线程数量 server_.setThreadNum(3); } void start() { server_.start(); } private: // 连接建立或断开的回调函数 void onConnection(const TcpConnectionPtr &conn) { if (conn->connected()) { LOG_INFO("Connection UP : %s", conn->peerAddress().toIpPort().c_str()); } else { LOG_INFO("Connection DOWN : %s", conn->peerAddress().toIpPort().c_str()); } } // 可读写事件回调 void onMessage(const TcpConnectionPtr &conn, Buffer *buf, Timestamp time) { std::string msg = buf->retrieveAllAsString(); conn->send(msg); // conn->shutdown(); // 关闭写端 底层响应EPOLLHUP => 执行closeCallback_ } EventLoop *loop_; TcpServer server_; }; int main() { EventLoop loop; InetAddress addr(8002); EchoServer server(&loop, addr, "EchoServer"); server.start(); loop.loop(); return 0; } ================================================ FILE: noncopyable.h ================================================ #pragma once // 防止头文件重复包含 /** * noncopyable被继承后 派生类对象可正常构造和析构 但派生类对象无法进行拷贝构造和赋值构造 **/ class noncopyable { public: noncopyable(const noncopyable &) = delete; noncopyable &operator=(const noncopyable &) = delete; // void operator=(const noncopyable &) = delete; // muduo将返回值变为void 这其实无可厚非 protected: noncopyable() = default; ~noncopyable() = default; };