8000 Add support for listening gRPC over UNIX socket by thevilledev · Pull Request #1159 · tensorflow/serving · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Add supp 8000 ort for listening gRPC over UNIX socket #1159

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions tensorflow_serving/model_servers/main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,10 @@ int main(int argc, char** argv) {
std::vector<tensorflow::Flag> flag_list = {
tensorflow::Flag("port", &options.grpc_port,
"Port to listen on for gRPC API"),
tensorflow::Flag("grpc_socket_path", &options.grpc_socket_path,
"If non-empty, listen to a UNIX socket for gRPC API "
"on the given path. Can be either relative or absolute "
"path."),
tensorflow::Flag("rest_api_port", &options.http_port,
"Port to listen on for HTTP/REST API. If set to zero "
"HTTP/REST API will not be exported. This port must be "
Expand Down
11 changes: 11 additions & 0 deletions tensorflow_serving/model_servers/server.cc
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,13 @@ Status Server::BuildAndStart(const Options& server_options) {
builder.AddListeningPort(
server_address,
BuildServerCredentialsFromSSLConfigFile(server_options.ssl_config_file));
// If defined, listen to a UNIX socket for gRPC.
if (!server_options.grpc_socket_path.empty()) {
const string grpc_socket_uri = "unix:" + server_options.grpc_socket_path;
builder.AddListeningPort(
grpc_socket_uri,
BuildServerCredentialsFromSSLConfigFile(server_options.ssl_config_file));
}
builder.RegisterService(model_service_.get());
builder.RegisterService(prediction_service_.get());
builder.SetMaxMessageSize(tensorflow::kint32max);
Expand All @@ -284,6 +291,10 @@ Status Server::BuildAndStart(const Options& server_options) {
return errors::InvalidArgument("Failed to BuildAndStart gRPC server");
}
LOG(INFO) << "Running gRPC ModelServer at " << server_address << " ...";
if (!server_options.grpc_socket_path.empty()) {
LOG(INFO) << "Running gRPC ModelServer at UNIX socket "
<< server_options.grpc_socket_path << " ...";
}

if (server_options.http_port != 0) {
if (server_options.http_port != server_options.grpc_port) {
Expand Down
1 change: 1 addition & 0 deletions tensorflow_serving/model_servers/server.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ class Server {
//
tensorflow::int32 grpc_port = 8500;
tensorflow::string grpc_channel_arguments;
tensorflow::string grpc_socket_path;

//
// HTTP Server options.
Expand Down
14 changes: 14 additions & 0 deletions tensorflow_serving/model_servers/tensorflow_model_server_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
HTTP_REST_TIMEOUT_MS = 5000
CHANNEL_WAIT_TIMEOUT = 5.0
WAIT_FOR_SERVER_READY_INT_SECS = 60
GRPC_SOCKET_PATH = "/tmp/tf-serving.sock"


def PickUnusedPort():
Expand Down Expand Up @@ -158,6 +159,7 @@ def RunServer(model_name,
command += ' --port=' + str(port)
command += ' --rest_api_port=' + str(rest_api_port)
command += ' --rest_api_timeout_in_ms=' + str(HTTP_REST_TIMEOUT_MS)
command += ' --grpc_socket_path=' + GRPC_SOCKET_PATH

if model_config_file:
command += ' --model_config_file=' + model_config_file
Expand Down Expand Up @@ -672,6 +674,18 @@ def testPrometheusEndpoint(self):
# Verify that there should be some metric type information.
self.assertIn('# TYPE', resp_data)

def testPredictUDS(self):
"""Test saved model prediction over a Unix domain socket."""
_ = TensorflowModelServerTest.RunServer(
'default',
self._GetSavedModelBundlePath())
model_server_address = "unix:%s" % GRPC_SOCKET_PATH
self.VerifyPredictRequest(
model_server_address,
expected_output=3.0,
specify_output=False,
expected_version=self._GetModelVersion(
self._GetSavedModelHalfPlusThreePath()))

if __name__ == '__main__':
tf.test.main()
0