因为 thrift 为每个连接使用一个唯一的线程,所以可以使用线程 id 将两者链接在一起。据我所知,节俭本身无法做到这一点。Imo 如果 thrift 将上下文字段向下传递给每个处理程序函数,那将是可能的。
下面是一个使用线程 id 的示例:
#include <map>
#include <thread>
typedef struct {
// you would put your connection specific variables here
} ConnectionContext;
// You could reverse these types if you had more than one
// context per thrift connection, eg. your service involved opening
// or connecting to more than one thing per thrift connection
std::map<std::thread::id, ConnectionContext> threadContextMap;
class OurEventHandler : public server::TServerEventHandler {
public:
OurEventHandler() :
NumClients_(0)
{}
//Called before the server begins -
//virtual void preServe() {}
//createContext may return a user-defined context to aid in cleaning
//up client connections upon disconnection. This example dispenses
//with contextual information and returns NULL.
virtual void* createContext(shared_ptr<protocol::TProtocol> input,
shared_ptr<protocol::TProtocol> output)
{
printf("Client connected (total %d)\n", ++NumClients_);
auto this_id = std::this_thread::get_id();
std::cout << "connected thread " << this_id << std::endl;
return NULL;
}
//Called when an client has disconnected, either naturally or by error.
virtual void deleteContext(void* serverContext,
shared_ptr<protocol::TProtocol>input, shared_ptr<protocol::TProtocol>output)
{
printf("Client disconnected (total %d)\n", --NumClients_);
auto this_id = std::this_thread::get_id();
std::cout << "disconnected thread " << this_id << std::endl;
auto context = threadContextMap[this_id];
// TODO: Perform your context specific cleanup code here
}
protected:
uint32_t NumClients_;
};
class yourRpcHandler : virtual public service_rpcIf
{
public:
yourRpcHandler() :
{
// Your initialization goes here
}
void SomeMethod()
{
auto context = threadContextMap[std::this_thread::get_id()];
// TODO: use the context as you see fit
}
};
int main(int argc, char **argv)
{
int port = 9090;
printf("Listening on port %d\n", port);
auto rpcHandler = new yourRpcHandler();
shared_ptr<yourRpcHandler> handler(rpcHandler);
shared_ptr<TProcessor> processor(new yourRpcProcessor(handler));
shared_ptr<TServerTransport> serverTransport(new TServerSocket(port));
shared_ptr<TTransportFactory> transportFactory(new TBufferedTransportFactory());
shared_ptr<TProtocolFactory> protocolFactory(new TBinaryProtocolFactory());
TThreadedServer server(processor, serverTransport, transportFactory, protocolFactory);
shared_ptr<OurEventHandler> EventHandler(new OurEventHandler());
server.setServerEventHandler(EventHandler);
server.serve();
return 0;
}