mirror of
https://github.com/joyieldInc/predixy.git
synced 2026-02-05 01:42:24 +08:00
Guard stats/latency access in info command
This commit is contained in:
parent
abe75e40a2
commit
b4c89eada9
@ -22,6 +22,60 @@ ConnectConnectionPool::~ConnectConnectionPool()
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ConnectConnectionPool::incrRequests()
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
++mStats.requests;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConnectConnectionPool::incrResponses()
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
++mStats.responses;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConnectConnectionPool::addSendBytes(long num)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
mStats.sendBytes += num;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConnectConnectionPool::addRecvBytes(long num)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
mStats.recvBytes += num;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConnectConnectionPool::addLatency(size_t i, long elapsed)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
mLatencyMonitors[i].add(elapsed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConnectConnectionPool::addLatency(size_t i, long elapsed, int idx)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
mLatencyMonitors[i].add(elapsed, idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
ServerStats ConnectConnectionPool::snapshotStats() const
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
return mStats;
|
||||||
|
}
|
||||||
|
|
||||||
|
LatencyMonitor ConnectConnectionPool::snapshotLatency(size_t i) const
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
return mLatencyMonitors[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t ConnectConnectionPool::latencyMonitorCount() const
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
return mLatencyMonitors.size();
|
||||||
|
}
|
||||||
|
|
||||||
ConnectConnection* ConnectConnectionPool::getShareConnection(int db)
|
ConnectConnection* ConnectConnectionPool::getShareConnection(int db)
|
||||||
{
|
{
|
||||||
FuncCallTimer();
|
FuncCallTimer();
|
||||||
@ -35,7 +89,10 @@ ConnectConnection* ConnectConnectionPool::getShareConnection(int db)
|
|||||||
if (!c) {
|
if (!c) {
|
||||||
c = ConnectConnectionAlloc::create(mServ, true);
|
c = ConnectConnectionAlloc::create(mServ, true);
|
||||||
c->setDb(db);
|
c->setDb(db);
|
||||||
++mStats.connections;
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
++mStats.connections;
|
||||||
|
}
|
||||||
mShareConns[db] = c;
|
mShareConns[db] = c;
|
||||||
needInit = true;
|
needInit = true;
|
||||||
logNotice("h %d create server connection %s %d",
|
logNotice("h %d create server connection %s %d",
|
||||||
@ -76,7 +133,10 @@ ConnectConnection* ConnectConnectionPool::getPrivateConnection(int db)
|
|||||||
}
|
}
|
||||||
c = ConnectConnectionAlloc::create(mServ, false);
|
c = ConnectConnectionAlloc::create(mServ, false);
|
||||||
c->setDb(db);
|
c->setDb(db);
|
||||||
++mStats.connections;
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
++mStats.connections;
|
||||||
|
}
|
||||||
needInit = true;
|
needInit = true;
|
||||||
logNotice("h %d create private server connection %s %d",
|
logNotice("h %d create private server connection %s %d",
|
||||||
mHandler->id(), c->peer(), c->fd());
|
mHandler->id(), c->peer(), c->fd());
|
||||||
@ -133,7 +193,10 @@ bool ConnectConnectionPool::init(ConnectConnection* c)
|
|||||||
mHandler->id(), c->peer(), c->fd());
|
mHandler->id(), c->peer(), c->fd());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
++mStats.connect;
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
++mStats.connect;
|
||||||
|
}
|
||||||
if (!c->connect()) {
|
if (!c->connect()) {
|
||||||
logWarn("h %d s %s %d connect fail",
|
logWarn("h %d s %s %d connect fail",
|
||||||
mHandler->id(), c->peer(), c->fd());
|
mHandler->id(), c->peer(), c->fd());
|
||||||
|
|||||||
@ -7,6 +7,7 @@
|
|||||||
#ifndef _PREDIXY_CONNECT_CONNECTION_POOL_H_
|
#ifndef _PREDIXY_CONNECT_CONNECTION_POOL_H_
|
||||||
#define _PREDIXY_CONNECT_CONNECTION_POOL_H_
|
#define _PREDIXY_CONNECT_CONNECTION_POOL_H_
|
||||||
|
|
||||||
|
#include <mutex>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "ConnectConnection.h"
|
#include "ConnectConnection.h"
|
||||||
#include "Server.h"
|
#include "Server.h"
|
||||||
@ -41,6 +42,15 @@ public:
|
|||||||
{
|
{
|
||||||
return --mPendRequests;
|
return --mPendRequests;
|
||||||
}
|
}
|
||||||
|
void incrRequests();
|
||||||
|
void incrResponses();
|
||||||
|
void addSendBytes(long num);
|
||||||
|
void addRecvBytes(long num);
|
||||||
|
void addLatency(size_t i, long elapsed);
|
||||||
|
void addLatency(size_t i, long elapsed, int idx);
|
||||||
|
ServerStats snapshotStats() const;
|
||||||
|
LatencyMonitor snapshotLatency(size_t i) const;
|
||||||
|
size_t latencyMonitorCount() const;
|
||||||
ServerStats& stats()
|
ServerStats& stats()
|
||||||
{
|
{
|
||||||
return mStats;
|
return mStats;
|
||||||
@ -59,6 +69,7 @@ public:
|
|||||||
}
|
}
|
||||||
void resetStats()
|
void resetStats()
|
||||||
{
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
mStats.reset();
|
mStats.reset();
|
||||||
for (auto& m : mLatencyMonitors) {
|
for (auto& m : mLatencyMonitors) {
|
||||||
m.reset();
|
m.reset();
|
||||||
@ -74,6 +85,7 @@ private:
|
|||||||
std::vector<ConnectConnectionList> mPrivateConns;
|
std::vector<ConnectConnectionList> mPrivateConns;
|
||||||
ServerStats mStats;
|
ServerStats mStats;
|
||||||
std::vector<LatencyMonitor> mLatencyMonitors;
|
std::vector<LatencyMonitor> mLatencyMonitors;
|
||||||
|
mutable std::mutex mStatsMtx;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
148
src/Handler.cpp
148
src/Handler.cpp
@ -81,6 +81,24 @@ void Handler::stop()
|
|||||||
mStop.store(true);
|
mStop.store(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HandlerStats Handler::snapshotStats() const
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
return mStats;
|
||||||
|
}
|
||||||
|
|
||||||
|
LatencyMonitor Handler::snapshotLatency(size_t i) const
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
return mLatencyMonitors[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t Handler::latencyMonitorCount() const
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
return mLatencyMonitors.size();
|
||||||
|
}
|
||||||
|
|
||||||
void Handler::refreshServerPool()
|
void Handler::refreshServerPool()
|
||||||
{
|
{
|
||||||
FuncCallTimer();
|
FuncCallTimer();
|
||||||
@ -221,7 +239,10 @@ void Handler::postAcceptConnectionEvent()
|
|||||||
mAcceptConns.remove(c);
|
mAcceptConns.remove(c);
|
||||||
c->unref();
|
c->unref();
|
||||||
c->close();
|
c->close();
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
--mStats.clientConnections;
|
--mStats.clientConnections;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
mPostAcceptConns.pop_front();
|
mPostAcceptConns.pop_front();
|
||||||
}
|
}
|
||||||
@ -296,7 +317,10 @@ void Handler::handleListenEvent(ListenSocket* s, int evts)
|
|||||||
socklen_t len = sizeof(addr);
|
socklen_t len = sizeof(addr);
|
||||||
int fd = s->accept((sockaddr*)&addr, &len);
|
int fd = s->accept((sockaddr*)&addr, &len);
|
||||||
if (fd >= 0) {
|
if (fd >= 0) {
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
++mStats.accept;
|
++mStats.accept;
|
||||||
|
}
|
||||||
addAcceptSocket(fd, (sockaddr*)&addr, len);
|
addAcceptSocket(fd, (sockaddr*)&addr, len);
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
@ -355,7 +379,10 @@ void Handler::addAcceptSocket(int fd, sockaddr* addr, socklen_t len)
|
|||||||
if (mEventLoop->addSocket(c)) {
|
if (mEventLoop->addSocket(c)) {
|
||||||
c->setLastActiveTime(Util::elapsedUSec());
|
c->setLastActiveTime(Util::elapsedUSec());
|
||||||
mAcceptConns.push_back(c);
|
mAcceptConns.push_back(c);
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
++mStats.clientConnections;
|
++mStats.clientConnections;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
fail = true;
|
fail = true;
|
||||||
}
|
}
|
||||||
@ -381,7 +408,10 @@ void Handler::handleAcceptConnectionEvent(AcceptConnection* c, int evts)
|
|||||||
if (c->lastActiveTime() < 0) {
|
if (c->lastActiveTime() < 0) {
|
||||||
c->setLastActiveTime(Util::elapsedUSec());
|
c->setLastActiveTime(Util::elapsedUSec());
|
||||||
mAcceptConns.push_back(c);
|
mAcceptConns.push_back(c);
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
++mStats.clientConnections;
|
++mStats.clientConnections;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (evts & Multiplexor::ErrorEvent) {
|
if (evts & Multiplexor::ErrorEvent) {
|
||||||
c->setStatus(AcceptConnection::EventError);
|
c->setStatus(AcceptConnection::EventError);
|
||||||
@ -457,7 +487,7 @@ ConnectConnection* Handler::getConnectConnection(Request* req, Server* serv)
|
|||||||
p = new ConnectConnectionPool(this, serv, serv->pool()->dbNum());
|
p = new ConnectConnectionPool(this, serv, serv->pool()->dbNum());
|
||||||
mConnPool[sid] = p;
|
mConnPool[sid] = p;
|
||||||
}
|
}
|
||||||
p->stats().requests++;
|
p->incrRequests();
|
||||||
int db = 0;
|
int db = 0;
|
||||||
auto c = req->connection();
|
auto c = req->connection();
|
||||||
if (c) {
|
if (c) {
|
||||||
@ -505,7 +535,10 @@ void Handler::handleRequest(Request* req)
|
|||||||
if (c && (c->isBlockRequest() || c->isCloseASAP())) {
|
if (c && (c->isBlockRequest() || c->isCloseASAP())) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
++mStats.requests;
|
++mStats.requests;
|
||||||
|
}
|
||||||
req->setDelivered();
|
req->setDelivered();
|
||||||
SegmentStr<Const::MaxKeyLen> key(req->key());
|
SegmentStr<Const::MaxKeyLen> key(req->key());
|
||||||
logDebug("h %d c %s %d handle req %ld %s %.*s",
|
logDebug("h %d c %s %d handle req %ld %s %.*s",
|
||||||
@ -793,8 +826,11 @@ void Handler::handleRequest(Request* req, ConnectConnection* s)
|
|||||||
}
|
}
|
||||||
s->send(this, req);
|
s->send(this, req);
|
||||||
addPostEvent(s, Multiplexor::WriteEvent);
|
addPostEvent(s, Multiplexor::WriteEvent);
|
||||||
mStats.requests++;
|
{
|
||||||
mConnPool[s->server()->id()]->stats().requests++;
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
++mStats.requests;
|
||||||
|
}
|
||||||
|
mConnPool[s->server()->id()]->incrRequests();
|
||||||
if (s->isShared()) {
|
if (s->isShared()) {
|
||||||
mConnPool[s->server()->id()]->incrPendRequests();
|
mConnPool[s->server()->id()]->incrPendRequests();
|
||||||
}
|
}
|
||||||
@ -837,9 +873,12 @@ void Handler::handleResponse(ConnectConnection* s, Request* req, Response* res)
|
|||||||
id(), (s ? s->peer() : "None"), (s ? s->fd() : -1),
|
id(), (s ? s->peer() : "None"), (s ? s->fd() : -1),
|
||||||
req->id(), req->cmd(), key.length(), key.data(),
|
req->id(), req->cmd(), key.length(), key.data(),
|
||||||
res->id(), res->typeStr());
|
res->id(), res->typeStr());
|
||||||
mStats.responses++;
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
++mStats.responses;
|
||||||
|
}
|
||||||
if (s) {
|
if (s) {
|
||||||
mConnPool[s->server()->id()]->stats().responses++;
|
mConnPool[s->server()->id()]->incrResponses();
|
||||||
if (s->isShared()) {
|
if (s->isShared()) {
|
||||||
mConnPool[s->server()->id()]->decrPendRequests();
|
mConnPool[s->server()->id()]->decrPendRequests();
|
||||||
}
|
}
|
||||||
@ -899,16 +938,24 @@ void Handler::handleResponse(ConnectConnection* s, Request* req, Response* res)
|
|||||||
addPostEvent(c, Multiplexor::WriteEvent);
|
addPostEvent(c, Multiplexor::WriteEvent);
|
||||||
}
|
}
|
||||||
long elapsed = Util::elapsedUSec() - req->createTime();
|
long elapsed = Util::elapsedUSec() - req->createTime();
|
||||||
if (auto cp = s ? mConnPool[s->server()->id()] : nullptr) {
|
int cmdType = static_cast<int>(req->type());
|
||||||
for (auto i : mProxy->latencyMonitorSet().cmdIndex(req->type())) {
|
if (cmdType >= 0 && cmdType < Command::AvailableCommands) {
|
||||||
int idx = mLatencyMonitors[i].add(elapsed);
|
if (auto cp = s ? mConnPool[s->server()->id()] : nullptr) {
|
||||||
if (idx >= 0) {
|
for (auto i : mProxy->latencyMonitorSet().cmdIndex(req->type())) {
|
||||||
cp->latencyMonitors()[i].add(elapsed, idx);
|
int idx = -1;
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
idx = mLatencyMonitors[i].add(elapsed);
|
||||||
|
}
|
||||||
|
if (idx >= 0) {
|
||||||
|
cp->addLatency(i, elapsed, idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (auto i : mProxy->latencyMonitorSet().cmdIndex(req->type())) {
|
||||||
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
mLatencyMonitors[i].add(elapsed);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for (auto i : mProxy->latencyMonitorSet().cmdIndex(req->type())) {
|
|
||||||
mLatencyMonitors[i].add(elapsed);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logInfo("RESP h %d c %s %d req %ld %s %.*s s %s %d res %ld %s t %ld",
|
logInfo("RESP h %d c %s %d req %ld %s %.*s s %s %d res %ld %s t %ld",
|
||||||
@ -1032,12 +1079,12 @@ void Handler::infoRequest(Request* req, const String& key)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (Scope(all, empty, "Stats")) {
|
if (Scope(all, empty, "Stats")) {
|
||||||
HandlerStats st(mStats);
|
HandlerStats st = snapshotStats();
|
||||||
for (auto h : mProxy->handlers()) {
|
for (auto h : mProxy->handlers()) {
|
||||||
if (h == this) {
|
if (h == this) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
st += h->mStats;
|
st += h->snapshotStats();
|
||||||
}
|
}
|
||||||
buf = buf->fappend("Accept:%ld\n", st.accept);
|
buf = buf->fappend("Accept:%ld\n", st.accept);
|
||||||
buf = buf->fappend("ClientConnections:%ld\n", st.clientConnections);
|
buf = buf->fappend("ClientConnections:%ld\n", st.clientConnections);
|
||||||
@ -1057,7 +1104,7 @@ void Handler::infoRequest(Request* req, const String& key)
|
|||||||
ServerStats st;
|
ServerStats st;
|
||||||
for (auto h : mProxy->handlers()) {
|
for (auto h : mProxy->handlers()) {
|
||||||
if (auto cp = h->getConnectConnectionPool(serv->id())) {
|
if (auto cp = h->getConnectConnectionPool(serv->id())) {
|
||||||
st += cp->stats();
|
st += cp->snapshotStats();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
buf = buf->fappend("Server:%s\n", serv->addr().data());
|
buf = buf->fappend("Server:%s\n", serv->addr().data());
|
||||||
@ -1079,15 +1126,21 @@ void Handler::infoRequest(Request* req, const String& key)
|
|||||||
|
|
||||||
if (Scope(all, empty, "LatencyMonitor")) {
|
if (Scope(all, empty, "LatencyMonitor")) {
|
||||||
LatencyMonitor lm;
|
LatencyMonitor lm;
|
||||||
for (size_t i = 0; i < mLatencyMonitors.size(); ++i) {
|
size_t count = latencyMonitorCount();
|
||||||
lm = mLatencyMonitors[i];
|
for (size_t i = 0; i < count; ++i) {
|
||||||
|
lm = snapshotLatency(i);
|
||||||
for (auto h : mProxy->handlers()) {
|
for (auto h : mProxy->handlers()) {
|
||||||
if (h == this) {
|
if (h == this) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
lm += h->mLatencyMonitors[i];
|
if (i < h->latencyMonitorCount()) {
|
||||||
|
lm += h->snapshotLatency(i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
buf = buf->fappend("LatencyMonitorName:%s\n", lm.name().data());
|
const char* lmName = lm.name().data();
|
||||||
|
int lmNameLen = lm.name().length();
|
||||||
|
buf = buf->fappend("LatencyMonitorName:%.*s\n",
|
||||||
|
lmNameLen, lmName ? lmName : "");
|
||||||
buf = lm.output(buf);
|
buf = lm.output(buf);
|
||||||
buf = buf->fappend("\n");
|
buf = buf->fappend("\n");
|
||||||
}
|
}
|
||||||
@ -1123,14 +1176,19 @@ void Handler::infoLatencyRequest(Request* req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
BufferPtr buf = body.fset(nullptr, "# LatencyMonitor\n");
|
BufferPtr buf = body.fset(nullptr, "# LatencyMonitor\n");
|
||||||
LatencyMonitor lm = mLatencyMonitors[i];
|
LatencyMonitor lm = snapshotLatency(i);
|
||||||
for (auto h : mProxy->handlers()) {
|
for (auto h : mProxy->handlers()) {
|
||||||
if (h == this) {
|
if (h == this) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
lm += h->mLatencyMonitors[i];
|
if (i < h->latencyMonitorCount()) {
|
||||||
|
lm += h->snapshotLatency(i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
buf = buf->fappend("LatencyMonitorName:%s\n", lm.name().data());
|
const char* lmName = lm.name().data();
|
||||||
|
int lmNameLen = lm.name().length();
|
||||||
|
buf = buf->fappend("LatencyMonitorName:%.*s\n",
|
||||||
|
lmNameLen, lmName ? lmName : "");
|
||||||
buf = lm.output(buf);
|
buf = lm.output(buf);
|
||||||
buf = buf->fappend("\n");
|
buf = buf->fappend("\n");
|
||||||
|
|
||||||
@ -1138,15 +1196,19 @@ void Handler::infoLatencyRequest(Request* req)
|
|||||||
auto sp = mProxy->serverPool();
|
auto sp = mProxy->serverPool();
|
||||||
int servCursor = 0;
|
int servCursor = 0;
|
||||||
while (Server* serv = sp->iter(servCursor)) {
|
while (Server* serv = sp->iter(servCursor)) {
|
||||||
lm = mLatencyMonitors[i];
|
lm = snapshotLatency(i);
|
||||||
lm.reset();
|
lm.reset();
|
||||||
for (auto h : mProxy->handlers()) {
|
for (auto h : mProxy->handlers()) {
|
||||||
if (auto cp = h->getConnectConnectionPool(serv->id())) {
|
if (auto cp = h->getConnectConnectionPool(serv->id())) {
|
||||||
lm += cp->latencyMonitors()[i];
|
if (i < cp->latencyMonitorCount()) {
|
||||||
|
lm += cp->snapshotLatency(i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
buf = buf->fappend("ServerLatencyMonitorName:%s %s\n",
|
const char* lmName = lm.name().data();
|
||||||
serv->addr().data(), lm.name().data());
|
int lmNameLen = lm.name().length();
|
||||||
|
buf = buf->fappend("ServerLatencyMonitorName:%s %.*s\n",
|
||||||
|
serv->addr().data(), lmNameLen, lmName ? lmName : "");
|
||||||
buf = lm.output(buf);
|
buf = lm.output(buf);
|
||||||
buf = buf->fappend("\n");
|
buf = buf->fappend("\n");
|
||||||
}
|
}
|
||||||
@ -1197,11 +1259,13 @@ void Handler::infoServerLatencyRequest(Request* req)
|
|||||||
handleResponse(nullptr, req, res);
|
handleResponse(nullptr, req, res);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
LatencyMonitor lm = mLatencyMonitors[i];
|
LatencyMonitor lm = snapshotLatency(i);
|
||||||
lm.reset();
|
lm.reset();
|
||||||
for (auto h : mProxy->handlers()) {
|
for (auto h : mProxy->handlers()) {
|
||||||
if (auto cp = h->getConnectConnectionPool(serv->id())) {
|
if (auto cp = h->getConnectConnectionPool(serv->id())) {
|
||||||
lm += cp->latencyMonitors()[i];
|
if (i < cp->latencyMonitorCount()) {
|
||||||
|
lm += cp->snapshotLatency(i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
buf = buf->fappend("ServerLatencyMonitorName:%s %s\n",
|
buf = buf->fappend("ServerLatencyMonitorName:%s %s\n",
|
||||||
@ -1209,16 +1273,21 @@ void Handler::infoServerLatencyRequest(Request* req)
|
|||||||
buf = lm.output(buf);
|
buf = lm.output(buf);
|
||||||
buf = buf->fappend("\n");
|
buf = buf->fappend("\n");
|
||||||
} else {
|
} else {
|
||||||
for (size_t i = 0; i < mLatencyMonitors.size(); ++i) {
|
size_t count = latencyMonitorCount();
|
||||||
LatencyMonitor lm = mLatencyMonitors[i];
|
for (size_t i = 0; i < count; ++i) {
|
||||||
|
LatencyMonitor lm = snapshotLatency(i);
|
||||||
lm.reset();
|
lm.reset();
|
||||||
for (auto h : mProxy->handlers()) {
|
for (auto h : mProxy->handlers()) {
|
||||||
if (auto cp = h->getConnectConnectionPool(serv->id())) {
|
if (auto cp = h->getConnectConnectionPool(serv->id())) {
|
||||||
lm += cp->latencyMonitors()[i];
|
if (i < cp->latencyMonitorCount()) {
|
||||||
|
lm += cp->snapshotLatency(i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
buf = buf->fappend("ServerLatencyMonitorName:%s %s\n",
|
const char* lmName = lm.name().data();
|
||||||
serv->addr().data(), lm.name().data());
|
int lmNameLen = lm.name().length();
|
||||||
|
buf = buf->fappend("ServerLatencyMonitorName:%s %.*s\n",
|
||||||
|
serv->addr().data(), lmNameLen, lmName ? lmName : "");
|
||||||
buf = lm.output(buf);
|
buf = lm.output(buf);
|
||||||
buf = buf->fappend("\n");
|
buf = buf->fappend("\n");
|
||||||
}
|
}
|
||||||
@ -1236,9 +1305,12 @@ void Handler::infoServerLatencyRequest(Request* req)
|
|||||||
|
|
||||||
void Handler::resetStats()
|
void Handler::resetStats()
|
||||||
{
|
{
|
||||||
mStats.reset();
|
{
|
||||||
for (auto& m : mLatencyMonitors) {
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
m.reset();
|
mStats.reset();
|
||||||
|
for (auto& m : mLatencyMonitors) {
|
||||||
|
m.reset();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for (auto cp : mConnPool) {
|
for (auto cp : mConnPool) {
|
||||||
if (cp) {
|
if (cp) {
|
||||||
|
|||||||
@ -7,6 +7,7 @@
|
|||||||
#ifndef _PREDIXY_HANDLER_H_
|
#ifndef _PREDIXY_HANDLER_H_
|
||||||
#define _PREDIXY_HANDLER_H_
|
#define _PREDIXY_HANDLER_H_
|
||||||
|
|
||||||
|
#include <mutex>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "Predixy.h"
|
#include "Predixy.h"
|
||||||
#include "Multiplexor.h"
|
#include "Multiplexor.h"
|
||||||
@ -52,6 +53,9 @@ public:
|
|||||||
{
|
{
|
||||||
return mLatencyMonitors;
|
return mLatencyMonitors;
|
||||||
}
|
}
|
||||||
|
HandlerStats snapshotStats() const;
|
||||||
|
LatencyMonitor snapshotLatency(size_t i) const;
|
||||||
|
size_t latencyMonitorCount() const;
|
||||||
ConnectConnectionPool* getConnectConnectionPool(int id) const
|
ConnectConnectionPool* getConnectConnectionPool(int id) const
|
||||||
{
|
{
|
||||||
return id < (int)mConnPool.size() ? mConnPool[id] : nullptr;
|
return id < (int)mConnPool.size() ? mConnPool[id] : nullptr;
|
||||||
@ -65,13 +69,19 @@ public:
|
|||||||
}
|
}
|
||||||
void addServerReadStats(Server* serv, int num)
|
void addServerReadStats(Server* serv, int num)
|
||||||
{
|
{
|
||||||
mStats.recvServerBytes += num;
|
{
|
||||||
mConnPool[serv->id()]->stats().recvBytes += num;
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
mStats.recvServerBytes += num;
|
||||||
|
}
|
||||||
|
mConnPool[serv->id()]->addRecvBytes(num);
|
||||||
}
|
}
|
||||||
void addServerWriteStats(Server* serv, int num)
|
void addServerWriteStats(Server* serv, int num)
|
||||||
{
|
{
|
||||||
mStats.sendServerBytes += num;
|
{
|
||||||
mConnPool[serv->id()]->stats().sendBytes += num;
|
std::lock_guard<std::mutex> lck(mStatsMtx);
|
||||||
|
mStats.sendServerBytes += num;
|
||||||
|
}
|
||||||
|
mConnPool[serv->id()]->addSendBytes(num);
|
||||||
}
|
}
|
||||||
IDUnique& idUnique()
|
IDUnique& idUnique()
|
||||||
{
|
{
|
||||||
@ -126,6 +136,7 @@ private:
|
|||||||
long mStatsVer;
|
long mStatsVer;
|
||||||
HandlerStats mStats;
|
HandlerStats mStats;
|
||||||
std::vector<LatencyMonitor> mLatencyMonitors;
|
std::vector<LatencyMonitor> mLatencyMonitors;
|
||||||
|
mutable std::mutex mStatsMtx;
|
||||||
IDUnique mIDUnique;
|
IDUnique mIDUnique;
|
||||||
unsigned int mRandSeed;
|
unsigned int mRandSeed;
|
||||||
};
|
};
|
||||||
|
|||||||
62
test/info_concurrent.py
Normal file
62
test/info_concurrent.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Exercise INFO while other commands are running to catch race regressions.
|
||||||
|
#
|
||||||
|
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import redis
|
||||||
|
from test_util import parse_args, get_host_port, exit_with_result
|
||||||
|
|
||||||
|
|
||||||
|
def run_load(client, stop_event, errors):
|
||||||
|
i = 0
|
||||||
|
while not stop_event.is_set():
|
||||||
|
try:
|
||||||
|
key = "info_concurrent:%d" % i
|
||||||
|
client.set(key, "v")
|
||||||
|
client.get(key)
|
||||||
|
i += 1
|
||||||
|
except Exception as exc:
|
||||||
|
errors.append(("load", str(exc)))
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def run_info(client, stop_event, errors):
|
||||||
|
while not stop_event.is_set():
|
||||||
|
try:
|
||||||
|
client.info()
|
||||||
|
except Exception as exc:
|
||||||
|
errors.append(("info", str(exc)))
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def run_test(host, port):
|
||||||
|
client = redis.StrictRedis(host=host, port=port)
|
||||||
|
stop_event = threading.Event()
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
threads = [
|
||||||
|
threading.Thread(target=run_load, args=(client, stop_event, errors)),
|
||||||
|
threading.Thread(target=run_load, args=(client, stop_event, errors)),
|
||||||
|
threading.Thread(target=run_info, args=(client, stop_event, errors)),
|
||||||
|
]
|
||||||
|
for t in threads:
|
||||||
|
t.start()
|
||||||
|
|
||||||
|
time.sleep(1.5)
|
||||||
|
stop_event.set()
|
||||||
|
for t in threads:
|
||||||
|
t.join()
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
print("FAIL: concurrent INFO errors", errors[0])
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = parse_args("INFO concurrent test")
|
||||||
|
host, port = get_host_port(args)
|
||||||
|
success = run_test(host, port)
|
||||||
|
exit_with_result(success, "info concurrent", "info concurrent")
|
||||||
@ -152,6 +152,7 @@ TESTS=(
|
|||||||
"test/string_to_int.py"
|
"test/string_to_int.py"
|
||||||
"test/handler_stop_atomic.py"
|
"test/handler_stop_atomic.py"
|
||||||
"test/logger_stop_atomic.py"
|
"test/logger_stop_atomic.py"
|
||||||
|
"test/info_concurrent.py"
|
||||||
"test/pubsub_long_name.py"
|
"test/pubsub_long_name.py"
|
||||||
"test/pubsub_large_message.py"
|
"test/pubsub_large_message.py"
|
||||||
"test/transaction_forbid.py"
|
"test/transaction_forbid.py"
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user