diff --git a/router-tests/connectrpc/connectrpc_client_test.go b/router-tests/connectrpc/connectrpc_client_test.go new file mode 100644 index 0000000000..67a7ac2435 --- /dev/null +++ b/router-tests/connectrpc/connectrpc_client_test.go @@ -0,0 +1,305 @@ +package integration + +import ( + "context" + "crypto/tls" + "encoding/json" + "net" + "net/http" + "testing" + + "connectrpc.com/connect" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + employeev1 "github.com/wundergraph/cosmo/router-tests/testdata/connectrpc/client/employee.v1" + "github.com/wundergraph/cosmo/router-tests/testdata/connectrpc/client/employee.v1/employeev1connect" + "golang.org/x/net/http2" +) + +// TestConnectRPC_ClientProtocols tests all three RPC protocols (Connect, gRPC, gRPC-Web) +// using generated client code to ensure proper multi-protocol support +func TestConnectRPC_ClientProtocols(t *testing.T) { + t.Parallel() + + // Use shared helper for employee GraphQL handler + ts := NewTestConnectRPCServer(t, ConnectRPCServerOptions{ + GraphQLHandler: EmployeeGraphQLHandler(), + }) + defer ts.Close() + + err := ts.Start() + require.NoError(t, err) + + baseURL := "http://" + ts.Addr().String() + + expectedEmployee := `{ + "id": 1, + "tag": "employee-1", + "details": { + "forename": "John", + "surname": "Doe", + "pets": [{"name": "Fluffy"}], + "location": {"key": {"name": "San Francisco"}} + } + }` + + t.Run("Connect protocol", func(t *testing.T) { + client := employeev1connect.NewEmployeeServiceClient( + http.DefaultClient, + baseURL, + // Connect protocol is the default + ) + + req := connect.NewRequest(&employeev1.GetEmployeeByIdRequest{ + EmployeeId: 1, + }) + + resp, err := client.GetEmployeeById(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp.Msg.Employee) + + employeeJSON, err := json.Marshal(resp.Msg.Employee) + require.NoError(t, err) + require.JSONEq(t, expectedEmployee, string(employeeJSON)) + }) + + t.Run("gRPC protocol", func(t *testing.T) { + // Create HTTP client with h2c support for gRPC over HTTP/1.1 + // This mimics what grpcurl does with -plaintext flag + h2cClient := &http.Client{ + Transport: &http2.Transport{ + // Allow HTTP/2 without TLS (h2c) + AllowHTTP: true, + // Use a custom dialer that doesn't require TLS + DialTLSContext: func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) + }, + }, + } + + client := employeev1connect.NewEmployeeServiceClient( + h2cClient, + baseURL, + connect.WithGRPC(), + ) + + req := connect.NewRequest(&employeev1.GetEmployeeByIdRequest{ + EmployeeId: 1, + }) + + resp, err := client.GetEmployeeById(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp.Msg.Employee) + + employeeJSON, err := json.Marshal(resp.Msg.Employee) + require.NoError(t, err) + require.JSONEq(t, expectedEmployee, string(employeeJSON)) + }) + + t.Run("gRPC-Web protocol", func(t *testing.T) { + client := employeev1connect.NewEmployeeServiceClient( + http.DefaultClient, + baseURL, + connect.WithGRPCWeb(), + ) + + req := connect.NewRequest(&employeev1.GetEmployeeByIdRequest{ + EmployeeId: 1, + }) + + resp, err := client.GetEmployeeById(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp.Msg.Employee) + + employeeJSON, err := json.Marshal(resp.Msg.Employee) + require.NoError(t, err) + require.JSONEq(t, expectedEmployee, string(employeeJSON)) + }) +} + +// TestConnectRPC_ClientErrorHandling tests error scenarios with generated client +func TestConnectRPC_ClientErrorHandling(t *testing.T) { + t.Parallel() + + t.Run("GraphQL error with no data returns error", func(t *testing.T) { + ts := NewTestConnectRPCServer(t, ConnectRPCServerOptions{ + GraphQLHandler: ErrorGraphQLHandler("Employee not found"), + }) + + err := ts.Start() + require.NoError(t, err) + + client := employeev1connect.NewEmployeeServiceClient( + http.DefaultClient, + "http://"+ts.Addr().String(), + ) + + req := connect.NewRequest(&employeev1.GetEmployeeByIdRequest{ + EmployeeId: 999, + }) + + _, err = client.GetEmployeeById(context.Background(), req) + require.Error(t, err) + + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + // GraphQL errors use CodeUnknown (not CodeInternal which implies server bugs) + assert.Equal(t, connect.CodeUnknown, connectErr.Code()) + assert.Contains(t, connectErr.Message(), "Employee not found") + }) + + t.Run("GraphQL error with partial data returns error", func(t *testing.T) { + // Custom handler for partial data with errors + // Per GraphQL spec, errors at top level indicate a failure even with partial data + handler := func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{ + "data": { + "employee": { + "id": 1, + "tag": "employee-1", + "details": { + "forename": "John", + "surname": "Doe" + } + } + }, + "errors": [{"message": "Could not fetch pets"}] + }`)) + } + + ts := NewTestConnectRPCServer(t, ConnectRPCServerOptions{ + GraphQLHandler: handler, + }) + + err := ts.Start() + require.NoError(t, err) + + client := employeev1connect.NewEmployeeServiceClient( + http.DefaultClient, + "http://"+ts.Addr().String(), + ) + + req := connect.NewRequest(&employeev1.GetEmployeeByIdRequest{ + EmployeeId: 1, + }) + + _, err = client.GetEmployeeById(context.Background(), req) + // Per GraphQL spec, errors at top level should result in an error + require.Error(t, err) + + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodeUnknown, connectErr.Code()) + assert.Contains(t, connectErr.Message(), "GraphQL partial success with errors") + }) + + t.Run("HTTP 404 maps to CodeNotFound", func(t *testing.T) { + ts := NewTestConnectRPCServer(t, ConnectRPCServerOptions{ + GraphQLHandler: HTTPErrorHandler(http.StatusNotFound, "Not Found"), + }) + + err := ts.Start() + require.NoError(t, err) + + client := employeev1connect.NewEmployeeServiceClient( + http.DefaultClient, + "http://"+ts.Addr().String(), + ) + + req := connect.NewRequest(&employeev1.GetEmployeeByIdRequest{ + EmployeeId: 1, + }) + + _, err = client.GetEmployeeById(context.Background(), req) + require.Error(t, err) + + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodeNotFound, connectErr.Code()) + }) + + t.Run("HTTP 500 maps to CodeInternal", func(t *testing.T) { + ts := NewTestConnectRPCServer(t, ConnectRPCServerOptions{ + GraphQLHandler: HTTPErrorHandler(http.StatusInternalServerError, "Internal Server Error"), + }) + + err := ts.Start() + require.NoError(t, err) + + client := employeev1connect.NewEmployeeServiceClient( + http.DefaultClient, + "http://"+ts.Addr().String(), + ) + + req := connect.NewRequest(&employeev1.GetEmployeeByIdRequest{ + EmployeeId: 1, + }) + + _, err = client.GetEmployeeById(context.Background(), req) + require.Error(t, err) + + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodeInternal, connectErr.Code()) + }) + + t.Run("multiple GraphQL errors with extension codes", func(t *testing.T) { + // Simulate a GraphQL response with multiple errors containing extension codes + handler := func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{ + "data": null, + "errors": [ + { + "message": "You are not authorized to access this resource", + "path": ["employee"], + "extensions": { + "code": "UNAUTHORIZED", + "statusCode": 401 + } + }, + { + "message": "Rate limit exceeded", + "path": ["employee"], + "extensions": { + "code": "RATE_LIMITED", + "retryAfter": 60 + } + } + ] + }`)) + } + + ts := NewTestConnectRPCServer(t, ConnectRPCServerOptions{ + GraphQLHandler: handler, + }) + + err := ts.Start() + require.NoError(t, err) + + client := employeev1connect.NewEmployeeServiceClient( + http.DefaultClient, + "http://"+ts.Addr().String(), + ) + + req := connect.NewRequest(&employeev1.GetEmployeeByIdRequest{ + EmployeeId: 1, + }) + + _, err = client.GetEmployeeById(context.Background(), req) + require.Error(t, err) + + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodeUnknown, connectErr.Code()) + + // The error message contains the first GraphQL error and indicates additional errors + // Format: "GraphQL operation failed: (and N more errors)" + assert.Contains(t, connectErr.Message(), "You are not authorized to access this resource") + assert.Contains(t, connectErr.Message(), "and 1 more errors") + }) +} diff --git a/router-tests/connectrpc/connectrpc_server_lifecycle_test.go b/router-tests/connectrpc/connectrpc_server_lifecycle_test.go new file mode 100644 index 0000000000..5a08d0531c --- /dev/null +++ b/router-tests/connectrpc/connectrpc_server_lifecycle_test.go @@ -0,0 +1,110 @@ +package integration + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/wundergraph/cosmo/router/pkg/connectrpc" + "go.uber.org/zap" +) + +// TestConnectRPC_ServerLifecycle_StartStopReload tests the complete lifecycle of the server +func TestConnectRPC_ServerLifecycle_StartStopReload(t *testing.T) { + t.Parallel() + + t.Run("complete lifecycle: start -> reload -> stop", func(t *testing.T) { + ts := NewTestConnectRPCServer(t, ConnectRPCServerOptions{}) + + // Start the server + err := ts.Start() + require.NoError(t, err) + + // Verify server is running + assert.Greater(t, ts.GetServiceCount(), 0) + + // Reload the server + err = ts.Reload() + require.NoError(t, err) + + // Verify server still works after reload + assert.Greater(t, ts.GetServiceCount(), 0) + + // Stop is handled by cleanup + }) + + t.Run("stop without start returns error", func(t *testing.T) { + server, err := connectrpc.NewServer(connectrpc.ServerConfig{ + ServicesDir: "../../router/pkg/connectrpc/samples/services", + GraphQLEndpoint: "http://localhost:4000/graphql", + Logger: zap.NewNop(), + }) + require.NoError(t, err) + + ctx := context.Background() + err = server.Stop(ctx) + assert.Error(t, err) + assert.Contains(t, err.Error(), "server is not started") + }) +} + +// TestConnectRPC_ServerLifecycle_ErrorScenarios tests various error scenarios +func TestConnectRPC_ServerLifecycle_ErrorScenarios(t *testing.T) { + t.Parallel() + + t.Run("NewServer fails with invalid proto directory", func(t *testing.T) { + _, err := connectrpc.NewServer(connectrpc.ServerConfig{ + ServicesDir: "/nonexistent/path", + GraphQLEndpoint: "http://localhost:4000/graphql", + Logger: zap.NewNop(), + }) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to discover services") + }) +} + +// TestConnectRPC_ServerLifecycle_StateTransitions tests state transitions +func TestConnectRPC_ServerLifecycle_StateTransitions(t *testing.T) { + t.Parallel() + + t.Run("service names remain consistent through reload", func(t *testing.T) { + ts := NewTestConnectRPCServer(t, ConnectRPCServerOptions{}) + + err := ts.Start() + require.NoError(t, err) + + namesBeforeReload := ts.GetServiceNames() + require.NotEmpty(t, namesBeforeReload) + + err = ts.Reload() + require.NoError(t, err) + + namesAfterReload := ts.GetServiceNames() + assert.ElementsMatch(t, namesBeforeReload, namesAfterReload, "service names should remain consistent") + }) +} + +// TestConnectRPC_ServerLifecycle_GracefulShutdown tests graceful shutdown behavior +func TestConnectRPC_ServerLifecycle_GracefulShutdown(t *testing.T) { + t.Parallel() + + t.Run("stop respects context deadline", func(t *testing.T) { + ts := NewTestConnectRPCServer(t, ConnectRPCServerOptions{}) + + err := ts.Start() + require.NoError(t, err) + + // Use a reasonable timeout + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + startTime := time.Now() + err = ts.Server.Stop(ctx) + duration := time.Since(startTime) + + assert.NoError(t, err) + assert.Less(t, duration, 10*time.Second, "stop should complete within timeout") + }) +} \ No newline at end of file diff --git a/router-tests/connectrpc/connectrpc_test.go b/router-tests/connectrpc/connectrpc_test.go new file mode 100644 index 0000000000..0f5de24c06 --- /dev/null +++ b/router-tests/connectrpc/connectrpc_test.go @@ -0,0 +1,37 @@ +package integration + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestConnectRPC_ServiceDiscovery tests service discovery functionality +func TestConnectRPC_ServiceDiscovery(t *testing.T) { + t.Parallel() + + t.Run("discovers services from proto files", func(t *testing.T) { + ts := NewTestConnectRPCServer(t, ConnectRPCServerOptions{}) + + err := ts.Start() + require.NoError(t, err) + + // Verify services are discovered + serviceCount := ts.GetServiceCount() + assert.Greater(t, serviceCount, 0, "should discover at least one service") + + serviceNames := ts.GetServiceNames() + assert.NotEmpty(t, serviceNames, "should have service names") + assert.Contains(t, serviceNames, "employee.v1.EmployeeService") + + // Log discovered services and operations + t.Logf("Discovered %d service(s):", serviceCount) + for _, serviceName := range serviceNames { + t.Logf(" - Service: %s", serviceName) + } + + operationCount := ts.GetOperationCount() + t.Logf("Discovered %d operation(s)", operationCount) + }) +} \ No newline at end of file diff --git a/router-tests/connectrpc/connectrpc_test_helpers.go b/router-tests/connectrpc/connectrpc_test_helpers.go new file mode 100644 index 0000000000..e71922eed0 --- /dev/null +++ b/router-tests/connectrpc/connectrpc_test_helpers.go @@ -0,0 +1,244 @@ +package integration + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/wundergraph/cosmo/router/pkg/connectrpc" + "go.uber.org/zap" +) + +// MockGraphQLServer is a test HTTP server that mocks GraphQL responses +type MockGraphQLServer struct { + server *http.Server + handler http.HandlerFunc + URL string +} + +// NewMockGraphQLServer creates a new mock GraphQL server with the given handler +func NewMockGraphQLServer(handler http.HandlerFunc) *MockGraphQLServer { + m := &MockGraphQLServer{ + handler: handler, + } + + mux := http.NewServeMux() + mux.HandleFunc("/graphql", func(w http.ResponseWriter, r *http.Request) { + // Log the incoming request for debugging + body, _ := io.ReadAll(r.Body) + _ = r.Body.Close() + r.Body = io.NopCloser(bytes.NewBuffer(body)) + + if m.handler != nil { + m.handler(w, r) + } + }) + + // Also handle root path for simpler tests + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if m.handler != nil { + m.handler(w, r) + } + }) + + m.server = &http.Server{ + Handler: mux, + Addr: "127.0.0.1:0", + } + + listener, err := net.Listen("tcp", m.server.Addr) + if err != nil { + panic(err) + } + + m.URL = "http://" + listener.Addr().String() + go m.server.Serve(listener) //nolint:errcheck // test server + + return m +} + +// Close shuts down the mock server +func (m *MockGraphQLServer) Close() { + if m.server != nil { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = m.server.Shutdown(ctx) + } +} + +// ConnectRPCServerOptions configures a test ConnectRPC server +type ConnectRPCServerOptions struct { + ServicesDir string + GraphQLEndpoint string + GraphQLHandler http.HandlerFunc // Custom GraphQL handler (creates mock server if provided) + ListenAddr string + Logger *zap.Logger +} + +// TestConnectRPCServer wraps a ConnectRPC server for testing +type TestConnectRPCServer struct { + Server *connectrpc.Server + GraphQLServer *MockGraphQLServer + t *testing.T + cleanupDone bool +} + +// NewTestConnectRPCServer creates a new test ConnectRPC server with automatic cleanup +func NewTestConnectRPCServer(t *testing.T, opts ConnectRPCServerOptions) *TestConnectRPCServer { + t.Helper() + + // Set defaults + if opts.ServicesDir == "" { + opts.ServicesDir = "../../router/pkg/connectrpc/samples/services" + } + if opts.ListenAddr == "" { + opts.ListenAddr = "localhost:0" + } + if opts.Logger == nil { + opts.Logger = zap.NewNop() + } + + // Create mock GraphQL server if endpoint not provided + var graphqlServer *MockGraphQLServer + if opts.GraphQLEndpoint == "" { + // Use custom handler if provided, otherwise use default + handler := opts.GraphQLHandler + if handler == nil { + handler = func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"data":{}}`)) + } + } + graphqlServer = NewMockGraphQLServer(handler) + opts.GraphQLEndpoint = graphqlServer.URL + "/graphql" + } + + server, err := connectrpc.NewServer(connectrpc.ServerConfig{ + ServicesDir: opts.ServicesDir, + GraphQLEndpoint: opts.GraphQLEndpoint, + ListenAddr: opts.ListenAddr, + Logger: opts.Logger, + }) + require.NoError(t, err) + + ts := &TestConnectRPCServer{ + Server: server, + GraphQLServer: graphqlServer, + t: t, + } + + // Register cleanup + t.Cleanup(func() { + ts.Close() + }) + + return ts +} + +// Start starts the ConnectRPC server +func (ts *TestConnectRPCServer) Start() error { + return ts.Server.Start() +} + +// Reload reloads the ConnectRPC server +func (ts *TestConnectRPCServer) Reload() error { + return ts.Server.Reload() +} + +// Close stops the server and cleans up resources +func (ts *TestConnectRPCServer) Close() { + if ts.cleanupDone { + return + } + ts.cleanupDone = true + + if ts.Server != nil { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = ts.Server.Stop(ctx) + } + + if ts.GraphQLServer != nil { + ts.GraphQLServer.Close() + } +} + +// Addr returns the server's listen address +func (ts *TestConnectRPCServer) Addr() net.Addr { + return ts.Server.Addr() +} + +// GetServiceCount returns the number of discovered services +func (ts *TestConnectRPCServer) GetServiceCount() int { + return ts.Server.GetServiceCount() +} + +// GetServiceNames returns the names of discovered services +func (ts *TestConnectRPCServer) GetServiceNames() []string { + return ts.Server.GetServiceNames() +} + +// GetOperationCount returns the number of loaded operations +func (ts *TestConnectRPCServer) GetOperationCount() int { + return ts.Server.GetOperationCount() +} + +// DefaultGraphQLResponse returns a standard test GraphQL response +func DefaultGraphQLResponse() string { + return `{ + "data": { + "employee": { + "id": 1, + "tag": "employee-1", + "details": { + "forename": "John", + "surname": "Doe", + "pets": [{"name": "Fluffy"}], + "location": {"key": {"name": "San Francisco"}} + } + } + } + }` +} + +// SimpleGraphQLHandler returns a handler that responds with a simple success response +func SimpleGraphQLHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"data":{}}`)) + } +} + +// EmployeeGraphQLHandler returns a handler that responds with employee data +func EmployeeGraphQLHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(DefaultGraphQLResponse())) + } +} + +// ErrorGraphQLHandler returns a handler that responds with a GraphQL error +func ErrorGraphQLHandler(message string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = fmt.Fprintf(w, `{"errors": [{"message": "%s"}]}`, message) + } +} + +// HTTPErrorHandler returns a handler that responds with an HTTP error +func HTTPErrorHandler(statusCode int, message string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(statusCode) + _, _ = w.Write([]byte(message)) + } +} diff --git a/router-tests/go.mod b/router-tests/go.mod index 3de9ec5fcf..aec2e6bdd2 100644 --- a/router-tests/go.mod +++ b/router-tests/go.mod @@ -3,6 +3,7 @@ module github.com/wundergraph/cosmo/router-tests go 1.25 require ( + connectrpc.com/connect v1.19.1 github.com/MicahParks/jwkset v0.11.0 github.com/buger/jsonparser v1.1.1 github.com/cloudflare/backoff v0.0.0-20240920015135-e46b80a3a7d0 @@ -42,7 +43,7 @@ require ( ) require ( - connectrpc.com/connect v1.16.2 // indirect + connectrpc.com/vanguard v0.3.0 // indirect github.com/99designs/gqlgen v0.17.76 // indirect github.com/KimMachineGun/automemlimit v0.6.1 // indirect github.com/MicahParks/keyfunc/v3 v3.6.2 // indirect diff --git a/router-tests/go.sum b/router-tests/go.sum index dc73f635fd..8bbb073a33 100644 --- a/router-tests/go.sum +++ b/router-tests/go.sum @@ -1,5 +1,7 @@ -connectrpc.com/connect v1.16.2 h1:ybd6y+ls7GOlb7Bh5C8+ghA6SvCBajHwxssO2CGFjqE= -connectrpc.com/connect v1.16.2/go.mod h1:n2kgwskMHXC+lVqb18wngEpF95ldBHXjZYJussz5FRc= +connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14= +connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w= +connectrpc.com/vanguard v0.3.0 h1:prUKFm8rYDwvpvnOSoqdUowPMK0tRA0pbSrQoMd6Zng= +connectrpc.com/vanguard v0.3.0/go.mod h1:nxQ7+N6qhBiQczqGwdTw4oCqx1rDryIt20cEdECqToM= github.com/99designs/gqlgen v0.17.76 h1:YsJBcfACWmXWU2t1yCjoGdOmqcTfOFpjbLAE443fmYI= github.com/99designs/gqlgen v0.17.76/go.mod h1:miiU+PkAnTIDKMQ1BseUOIVeQHoiwYDZGCswoxl7xec= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -172,8 +174,8 @@ github.com/jensneuse/byte-template v0.0.0-20231025215717-69252eb3ed56 h1:wo26fh6 github.com/jensneuse/byte-template v0.0.0-20231025215717-69252eb3ed56/go.mod h1:0D5r/VSW6D/o65rKLL9xk7sZxL2+oku2HvFPYeIMFr4= github.com/jensneuse/diffview v1.0.0 h1:4b6FQJ7y3295JUHU3tRko6euyEboL825ZsXeZZM47Z4= github.com/jensneuse/diffview v1.0.0/go.mod h1:i6IacuD8LnEaPuiyzMHA+Wfz5mAuycMOf3R/orUY9y4= -github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= -github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= +github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= diff --git a/router-tests/testdata/connectrpc/README.md b/router-tests/testdata/connectrpc/README.md new file mode 100644 index 0000000000..0901217717 --- /dev/null +++ b/router-tests/testdata/connectrpc/README.md @@ -0,0 +1,33 @@ +# ConnectRPC Test Data + +This directory contains Protocol Buffer definitions and GraphQL operations for ConnectRPC integration tests. + +## Writing New Tests + +To add a new service for testing: + +1. Create a new directory under `services/` (e.g., `services/myservice.v1/`) +2. Add your `.proto` file with service definitions - or generate it with `wgc grpc-service generate` +3. Add corresponding `.graphql` files (GraphQL Executable Operations) for each RPC method +4. The ConnectRPC server will automatically discover and load them + +### Example Structure + +```text +services/ +└── myservice.v1/ + ├── service.proto # Proto service definition + ├── QueryGetItem.graphql # GraphQL query operation + └── MutationCreateItem.graphql # GraphQL mutation operation +``` + +## Regenerating Client Code + +The `client/` directory contains generated client code used by E2E tests. This code is **committed to the repository**. + +### When to Regenerate + +Regenerate when: +- Proto service definitions are modified (`services/*/service.proto`) +- GraphQL operations are added, removed, or modified (`services/*/*.graphql`) +- Message types are changed in proto files diff --git a/router-tests/testdata/connectrpc/buf.gen.yaml b/router-tests/testdata/connectrpc/buf.gen.yaml new file mode 100644 index 0000000000..d6931ddf73 --- /dev/null +++ b/router-tests/testdata/connectrpc/buf.gen.yaml @@ -0,0 +1,15 @@ +version: v2 +managed: + enabled: true + override: + - file_option: go_package_prefix + value: github.com/wundergraph/cosmo/router-tests/testdata/connectrpc/client +plugins: + - local: protoc-gen-go + out: client + opt: + - paths=source_relative + - local: protoc-gen-connect-go + out: client + opt: + - paths=source_relative diff --git a/router-tests/testdata/connectrpc/buf.yaml b/router-tests/testdata/connectrpc/buf.yaml new file mode 100644 index 0000000000..07d7fc1d44 --- /dev/null +++ b/router-tests/testdata/connectrpc/buf.yaml @@ -0,0 +1,9 @@ +version: v2 +modules: + - path: services +lint: + use: + - STANDARD +breaking: + use: + - FILE \ No newline at end of file diff --git a/router-tests/testdata/connectrpc/client/employee.v1/employeev1connect/service.connect.go b/router-tests/testdata/connectrpc/client/employee.v1/employeev1connect/service.connect.go new file mode 100644 index 0000000000..714da42fed --- /dev/null +++ b/router-tests/testdata/connectrpc/client/employee.v1/employeev1connect/service.connect.go @@ -0,0 +1,307 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: employee.v1/service.proto + +package employeev1connect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + employee_v1 "github.com/wundergraph/cosmo/router-tests/testdata/connectrpc/client/employee.v1" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // EmployeeServiceName is the fully-qualified name of the EmployeeService service. + EmployeeServiceName = "employee.v1.EmployeeService" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // EmployeeServiceFindEmployeesByPetsProcedure is the fully-qualified name of the EmployeeService's + // FindEmployeesByPets RPC. + EmployeeServiceFindEmployeesByPetsProcedure = "/employee.v1.EmployeeService/FindEmployeesByPets" + // EmployeeServiceFindEmployeesByPetsInlineFragmentProcedure is the fully-qualified name of the + // EmployeeService's FindEmployeesByPetsInlineFragment RPC. + EmployeeServiceFindEmployeesByPetsInlineFragmentProcedure = "/employee.v1.EmployeeService/FindEmployeesByPetsInlineFragment" + // EmployeeServiceFindEmployeesByPetsNamedFragmentProcedure is the fully-qualified name of the + // EmployeeService's FindEmployeesByPetsNamedFragment RPC. + EmployeeServiceFindEmployeesByPetsNamedFragmentProcedure = "/employee.v1.EmployeeService/FindEmployeesByPetsNamedFragment" + // EmployeeServiceGetEmployeeByIdProcedure is the fully-qualified name of the EmployeeService's + // GetEmployeeById RPC. + EmployeeServiceGetEmployeeByIdProcedure = "/employee.v1.EmployeeService/GetEmployeeById" + // EmployeeServiceGetEmployeesProcedure is the fully-qualified name of the EmployeeService's + // GetEmployees RPC. + EmployeeServiceGetEmployeesProcedure = "/employee.v1.EmployeeService/GetEmployees" + // EmployeeServiceGetEmployeesWithMoodProcedure is the fully-qualified name of the EmployeeService's + // GetEmployeesWithMood RPC. + EmployeeServiceGetEmployeesWithMoodProcedure = "/employee.v1.EmployeeService/GetEmployeesWithMood" + // EmployeeServiceUpdateEmployeeMoodProcedure is the fully-qualified name of the EmployeeService's + // UpdateEmployeeMood RPC. + EmployeeServiceUpdateEmployeeMoodProcedure = "/employee.v1.EmployeeService/UpdateEmployeeMood" +) + +// These variables are the protoreflect.Descriptor objects for the RPCs defined in this package. +var ( + employeeServiceServiceDescriptor = employee_v1.File_employee_v1_service_proto.Services().ByName("EmployeeService") + employeeServiceFindEmployeesByPetsMethodDescriptor = employeeServiceServiceDescriptor.Methods().ByName("FindEmployeesByPets") + employeeServiceFindEmployeesByPetsInlineFragmentMethodDescriptor = employeeServiceServiceDescriptor.Methods().ByName("FindEmployeesByPetsInlineFragment") + employeeServiceFindEmployeesByPetsNamedFragmentMethodDescriptor = employeeServiceServiceDescriptor.Methods().ByName("FindEmployeesByPetsNamedFragment") + employeeServiceGetEmployeeByIdMethodDescriptor = employeeServiceServiceDescriptor.Methods().ByName("GetEmployeeById") + employeeServiceGetEmployeesMethodDescriptor = employeeServiceServiceDescriptor.Methods().ByName("GetEmployees") + employeeServiceGetEmployeesWithMoodMethodDescriptor = employeeServiceServiceDescriptor.Methods().ByName("GetEmployeesWithMood") + employeeServiceUpdateEmployeeMoodMethodDescriptor = employeeServiceServiceDescriptor.Methods().ByName("UpdateEmployeeMood") +) + +// EmployeeServiceClient is a client for the employee.v1.EmployeeService service. +type EmployeeServiceClient interface { + FindEmployeesByPets(context.Context, *connect.Request[employee_v1.FindEmployeesByPetsRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsResponse], error) + FindEmployeesByPetsInlineFragment(context.Context, *connect.Request[employee_v1.FindEmployeesByPetsInlineFragmentRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsInlineFragmentResponse], error) + FindEmployeesByPetsNamedFragment(context.Context, *connect.Request[employee_v1.FindEmployeesByPetsNamedFragmentRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsNamedFragmentResponse], error) + GetEmployeeById(context.Context, *connect.Request[employee_v1.GetEmployeeByIdRequest]) (*connect.Response[employee_v1.GetEmployeeByIdResponse], error) + GetEmployees(context.Context, *connect.Request[employee_v1.GetEmployeesRequest]) (*connect.Response[employee_v1.GetEmployeesResponse], error) + GetEmployeesWithMood(context.Context, *connect.Request[employee_v1.GetEmployeesWithMoodRequest]) (*connect.Response[employee_v1.GetEmployeesWithMoodResponse], error) + UpdateEmployeeMood(context.Context, *connect.Request[employee_v1.UpdateEmployeeMoodRequest]) (*connect.Response[employee_v1.UpdateEmployeeMoodResponse], error) +} + +// NewEmployeeServiceClient constructs a client for the employee.v1.EmployeeService service. By +// default, it uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, +// and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the +// connect.WithGRPC() or connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewEmployeeServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) EmployeeServiceClient { + baseURL = strings.TrimRight(baseURL, "/") + return &employeeServiceClient{ + findEmployeesByPets: connect.NewClient[employee_v1.FindEmployeesByPetsRequest, employee_v1.FindEmployeesByPetsResponse]( + httpClient, + baseURL+EmployeeServiceFindEmployeesByPetsProcedure, + connect.WithSchema(employeeServiceFindEmployeesByPetsMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithClientOptions(opts...), + ), + findEmployeesByPetsInlineFragment: connect.NewClient[employee_v1.FindEmployeesByPetsInlineFragmentRequest, employee_v1.FindEmployeesByPetsInlineFragmentResponse]( + httpClient, + baseURL+EmployeeServiceFindEmployeesByPetsInlineFragmentProcedure, + connect.WithSchema(employeeServiceFindEmployeesByPetsInlineFragmentMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithClientOptions(opts...), + ), + findEmployeesByPetsNamedFragment: connect.NewClient[employee_v1.FindEmployeesByPetsNamedFragmentRequest, employee_v1.FindEmployeesByPetsNamedFragmentResponse]( + httpClient, + baseURL+EmployeeServiceFindEmployeesByPetsNamedFragmentProcedure, + connect.WithSchema(employeeServiceFindEmployeesByPetsNamedFragmentMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithClientOptions(opts...), + ), + getEmployeeById: connect.NewClient[employee_v1.GetEmployeeByIdRequest, employee_v1.GetEmployeeByIdResponse]( + httpClient, + baseURL+EmployeeServiceGetEmployeeByIdProcedure, + connect.WithSchema(employeeServiceGetEmployeeByIdMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithClientOptions(opts...), + ), + getEmployees: connect.NewClient[employee_v1.GetEmployeesRequest, employee_v1.GetEmployeesResponse]( + httpClient, + baseURL+EmployeeServiceGetEmployeesProcedure, + connect.WithSchema(employeeServiceGetEmployeesMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithClientOptions(opts...), + ), + getEmployeesWithMood: connect.NewClient[employee_v1.GetEmployeesWithMoodRequest, employee_v1.GetEmployeesWithMoodResponse]( + httpClient, + baseURL+EmployeeServiceGetEmployeesWithMoodProcedure, + connect.WithSchema(employeeServiceGetEmployeesWithMoodMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithClientOptions(opts...), + ), + updateEmployeeMood: connect.NewClient[employee_v1.UpdateEmployeeMoodRequest, employee_v1.UpdateEmployeeMoodResponse]( + httpClient, + baseURL+EmployeeServiceUpdateEmployeeMoodProcedure, + connect.WithSchema(employeeServiceUpdateEmployeeMoodMethodDescriptor), + connect.WithClientOptions(opts...), + ), + } +} + +// employeeServiceClient implements EmployeeServiceClient. +type employeeServiceClient struct { + findEmployeesByPets *connect.Client[employee_v1.FindEmployeesByPetsRequest, employee_v1.FindEmployeesByPetsResponse] + findEmployeesByPetsInlineFragment *connect.Client[employee_v1.FindEmployeesByPetsInlineFragmentRequest, employee_v1.FindEmployeesByPetsInlineFragmentResponse] + findEmployeesByPetsNamedFragment *connect.Client[employee_v1.FindEmployeesByPetsNamedFragmentRequest, employee_v1.FindEmployeesByPetsNamedFragmentResponse] + getEmployeeById *connect.Client[employee_v1.GetEmployeeByIdRequest, employee_v1.GetEmployeeByIdResponse] + getEmployees *connect.Client[employee_v1.GetEmployeesRequest, employee_v1.GetEmployeesResponse] + getEmployeesWithMood *connect.Client[employee_v1.GetEmployeesWithMoodRequest, employee_v1.GetEmployeesWithMoodResponse] + updateEmployeeMood *connect.Client[employee_v1.UpdateEmployeeMoodRequest, employee_v1.UpdateEmployeeMoodResponse] +} + +// FindEmployeesByPets calls employee.v1.EmployeeService.FindEmployeesByPets. +func (c *employeeServiceClient) FindEmployeesByPets(ctx context.Context, req *connect.Request[employee_v1.FindEmployeesByPetsRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsResponse], error) { + return c.findEmployeesByPets.CallUnary(ctx, req) +} + +// FindEmployeesByPetsInlineFragment calls +// employee.v1.EmployeeService.FindEmployeesByPetsInlineFragment. +func (c *employeeServiceClient) FindEmployeesByPetsInlineFragment(ctx context.Context, req *connect.Request[employee_v1.FindEmployeesByPetsInlineFragmentRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsInlineFragmentResponse], error) { + return c.findEmployeesByPetsInlineFragment.CallUnary(ctx, req) +} + +// FindEmployeesByPetsNamedFragment calls +// employee.v1.EmployeeService.FindEmployeesByPetsNamedFragment. +func (c *employeeServiceClient) FindEmployeesByPetsNamedFragment(ctx context.Context, req *connect.Request[employee_v1.FindEmployeesByPetsNamedFragmentRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsNamedFragmentResponse], error) { + return c.findEmployeesByPetsNamedFragment.CallUnary(ctx, req) +} + +// GetEmployeeById calls employee.v1.EmployeeService.GetEmployeeById. +func (c *employeeServiceClient) GetEmployeeById(ctx context.Context, req *connect.Request[employee_v1.GetEmployeeByIdRequest]) (*connect.Response[employee_v1.GetEmployeeByIdResponse], error) { + return c.getEmployeeById.CallUnary(ctx, req) +} + +// GetEmployees calls employee.v1.EmployeeService.GetEmployees. +func (c *employeeServiceClient) GetEmployees(ctx context.Context, req *connect.Request[employee_v1.GetEmployeesRequest]) (*connect.Response[employee_v1.GetEmployeesResponse], error) { + return c.getEmployees.CallUnary(ctx, req) +} + +// GetEmployeesWithMood calls employee.v1.EmployeeService.GetEmployeesWithMood. +func (c *employeeServiceClient) GetEmployeesWithMood(ctx context.Context, req *connect.Request[employee_v1.GetEmployeesWithMoodRequest]) (*connect.Response[employee_v1.GetEmployeesWithMoodResponse], error) { + return c.getEmployeesWithMood.CallUnary(ctx, req) +} + +// UpdateEmployeeMood calls employee.v1.EmployeeService.UpdateEmployeeMood. +func (c *employeeServiceClient) UpdateEmployeeMood(ctx context.Context, req *connect.Request[employee_v1.UpdateEmployeeMoodRequest]) (*connect.Response[employee_v1.UpdateEmployeeMoodResponse], error) { + return c.updateEmployeeMood.CallUnary(ctx, req) +} + +// EmployeeServiceHandler is an implementation of the employee.v1.EmployeeService service. +type EmployeeServiceHandler interface { + FindEmployeesByPets(context.Context, *connect.Request[employee_v1.FindEmployeesByPetsRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsResponse], error) + FindEmployeesByPetsInlineFragment(context.Context, *connect.Request[employee_v1.FindEmployeesByPetsInlineFragmentRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsInlineFragmentResponse], error) + FindEmployeesByPetsNamedFragment(context.Context, *connect.Request[employee_v1.FindEmployeesByPetsNamedFragmentRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsNamedFragmentResponse], error) + GetEmployeeById(context.Context, *connect.Request[employee_v1.GetEmployeeByIdRequest]) (*connect.Response[employee_v1.GetEmployeeByIdResponse], error) + GetEmployees(context.Context, *connect.Request[employee_v1.GetEmployeesRequest]) (*connect.Response[employee_v1.GetEmployeesResponse], error) + GetEmployeesWithMood(context.Context, *connect.Request[employee_v1.GetEmployeesWithMoodRequest]) (*connect.Response[employee_v1.GetEmployeesWithMoodResponse], error) + UpdateEmployeeMood(context.Context, *connect.Request[employee_v1.UpdateEmployeeMoodRequest]) (*connect.Response[employee_v1.UpdateEmployeeMoodResponse], error) +} + +// NewEmployeeServiceHandler builds an HTTP handler from the service implementation. It returns the +// path on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewEmployeeServiceHandler(svc EmployeeServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + employeeServiceFindEmployeesByPetsHandler := connect.NewUnaryHandler( + EmployeeServiceFindEmployeesByPetsProcedure, + svc.FindEmployeesByPets, + connect.WithSchema(employeeServiceFindEmployeesByPetsMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithHandlerOptions(opts...), + ) + employeeServiceFindEmployeesByPetsInlineFragmentHandler := connect.NewUnaryHandler( + EmployeeServiceFindEmployeesByPetsInlineFragmentProcedure, + svc.FindEmployeesByPetsInlineFragment, + connect.WithSchema(employeeServiceFindEmployeesByPetsInlineFragmentMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithHandlerOptions(opts...), + ) + employeeServiceFindEmployeesByPetsNamedFragmentHandler := connect.NewUnaryHandler( + EmployeeServiceFindEmployeesByPetsNamedFragmentProcedure, + svc.FindEmployeesByPetsNamedFragment, + connect.WithSchema(employeeServiceFindEmployeesByPetsNamedFragmentMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithHandlerOptions(opts...), + ) + employeeServiceGetEmployeeByIdHandler := connect.NewUnaryHandler( + EmployeeServiceGetEmployeeByIdProcedure, + svc.GetEmployeeById, + connect.WithSchema(employeeServiceGetEmployeeByIdMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithHandlerOptions(opts...), + ) + employeeServiceGetEmployeesHandler := connect.NewUnaryHandler( + EmployeeServiceGetEmployeesProcedure, + svc.GetEmployees, + connect.WithSchema(employeeServiceGetEmployeesMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithHandlerOptions(opts...), + ) + employeeServiceGetEmployeesWithMoodHandler := connect.NewUnaryHandler( + EmployeeServiceGetEmployeesWithMoodProcedure, + svc.GetEmployeesWithMood, + connect.WithSchema(employeeServiceGetEmployeesWithMoodMethodDescriptor), + connect.WithIdempotency(connect.IdempotencyNoSideEffects), + connect.WithHandlerOptions(opts...), + ) + employeeServiceUpdateEmployeeMoodHandler := connect.NewUnaryHandler( + EmployeeServiceUpdateEmployeeMoodProcedure, + svc.UpdateEmployeeMood, + connect.WithSchema(employeeServiceUpdateEmployeeMoodMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + return "/employee.v1.EmployeeService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case EmployeeServiceFindEmployeesByPetsProcedure: + employeeServiceFindEmployeesByPetsHandler.ServeHTTP(w, r) + case EmployeeServiceFindEmployeesByPetsInlineFragmentProcedure: + employeeServiceFindEmployeesByPetsInlineFragmentHandler.ServeHTTP(w, r) + case EmployeeServiceFindEmployeesByPetsNamedFragmentProcedure: + employeeServiceFindEmployeesByPetsNamedFragmentHandler.ServeHTTP(w, r) + case EmployeeServiceGetEmployeeByIdProcedure: + employeeServiceGetEmployeeByIdHandler.ServeHTTP(w, r) + case EmployeeServiceGetEmployeesProcedure: + employeeServiceGetEmployeesHandler.ServeHTTP(w, r) + case EmployeeServiceGetEmployeesWithMoodProcedure: + employeeServiceGetEmployeesWithMoodHandler.ServeHTTP(w, r) + case EmployeeServiceUpdateEmployeeMoodProcedure: + employeeServiceUpdateEmployeeMoodHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedEmployeeServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedEmployeeServiceHandler struct{} + +func (UnimplementedEmployeeServiceHandler) FindEmployeesByPets(context.Context, *connect.Request[employee_v1.FindEmployeesByPetsRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("employee.v1.EmployeeService.FindEmployeesByPets is not implemented")) +} + +func (UnimplementedEmployeeServiceHandler) FindEmployeesByPetsInlineFragment(context.Context, *connect.Request[employee_v1.FindEmployeesByPetsInlineFragmentRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsInlineFragmentResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("employee.v1.EmployeeService.FindEmployeesByPetsInlineFragment is not implemented")) +} + +func (UnimplementedEmployeeServiceHandler) FindEmployeesByPetsNamedFragment(context.Context, *connect.Request[employee_v1.FindEmployeesByPetsNamedFragmentRequest]) (*connect.Response[employee_v1.FindEmployeesByPetsNamedFragmentResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("employee.v1.EmployeeService.FindEmployeesByPetsNamedFragment is not implemented")) +} + +func (UnimplementedEmployeeServiceHandler) GetEmployeeById(context.Context, *connect.Request[employee_v1.GetEmployeeByIdRequest]) (*connect.Response[employee_v1.GetEmployeeByIdResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("employee.v1.EmployeeService.GetEmployeeById is not implemented")) +} + +func (UnimplementedEmployeeServiceHandler) GetEmployees(context.Context, *connect.Request[employee_v1.GetEmployeesRequest]) (*connect.Response[employee_v1.GetEmployeesResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("employee.v1.EmployeeService.GetEmployees is not implemented")) +} + +func (UnimplementedEmployeeServiceHandler) GetEmployeesWithMood(context.Context, *connect.Request[employee_v1.GetEmployeesWithMoodRequest]) (*connect.Response[employee_v1.GetEmployeesWithMoodResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("employee.v1.EmployeeService.GetEmployeesWithMood is not implemented")) +} + +func (UnimplementedEmployeeServiceHandler) UpdateEmployeeMood(context.Context, *connect.Request[employee_v1.UpdateEmployeeMoodRequest]) (*connect.Response[employee_v1.UpdateEmployeeMoodResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("employee.v1.EmployeeService.UpdateEmployeeMood is not implemented")) +} diff --git a/router-tests/testdata/connectrpc/client/employee.v1/service.pb.go b/router-tests/testdata/connectrpc/client/employee.v1/service.pb.go new file mode 100644 index 0000000000..c7d7710246 --- /dev/null +++ b/router-tests/testdata/connectrpc/client/employee.v1/service.pb.go @@ -0,0 +1,2811 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: employee.v1/service.proto + +package employeev1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Mood int32 + +const ( + Mood_MOOD_UNSPECIFIED Mood = 0 + Mood_MOOD_HAPPY Mood = 1 + Mood_MOOD_SAD Mood = 2 +) + +// Enum value maps for Mood. +var ( + Mood_name = map[int32]string{ + 0: "MOOD_UNSPECIFIED", + 1: "MOOD_HAPPY", + 2: "MOOD_SAD", + } + Mood_value = map[string]int32{ + "MOOD_UNSPECIFIED": 0, + "MOOD_HAPPY": 1, + "MOOD_SAD": 2, + } +) + +func (x Mood) Enum() *Mood { + p := new(Mood) + *p = x + return p +} + +func (x Mood) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Mood) Descriptor() protoreflect.EnumDescriptor { + return file_employee_v1_service_proto_enumTypes[0].Descriptor() +} + +func (Mood) Type() protoreflect.EnumType { + return &file_employee_v1_service_proto_enumTypes[0] +} + +func (x Mood) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Mood.Descriptor instead. +func (Mood) EnumDescriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{0} +} + +type Gender int32 + +const ( + Gender_GENDER_UNSPECIFIED Gender = 0 + Gender_GENDER_FEMALE Gender = 1 + Gender_GENDER_MALE Gender = 2 + Gender_GENDER_UNKNOWN Gender = 3 +) + +// Enum value maps for Gender. +var ( + Gender_name = map[int32]string{ + 0: "GENDER_UNSPECIFIED", + 1: "GENDER_FEMALE", + 2: "GENDER_MALE", + 3: "GENDER_UNKNOWN", + } + Gender_value = map[string]int32{ + "GENDER_UNSPECIFIED": 0, + "GENDER_FEMALE": 1, + "GENDER_MALE": 2, + "GENDER_UNKNOWN": 3, + } +) + +func (x Gender) Enum() *Gender { + p := new(Gender) + *p = x + return p +} + +func (x Gender) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Gender) Descriptor() protoreflect.EnumDescriptor { + return file_employee_v1_service_proto_enumTypes[1].Descriptor() +} + +func (Gender) Type() protoreflect.EnumType { + return &file_employee_v1_service_proto_enumTypes[1] +} + +func (x Gender) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Gender.Descriptor instead. +func (Gender) EnumDescriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{1} +} + +type UpdateEmployeeMoodRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EmployeeId int32 `protobuf:"varint,1,opt,name=employee_id,json=employeeId,proto3" json:"employee_id,omitempty"` + Mood Mood `protobuf:"varint,2,opt,name=mood,proto3,enum=employee.v1.Mood" json:"mood,omitempty"` +} + +func (x *UpdateEmployeeMoodRequest) Reset() { + *x = UpdateEmployeeMoodRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateEmployeeMoodRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateEmployeeMoodRequest) ProtoMessage() {} + +func (x *UpdateEmployeeMoodRequest) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEmployeeMoodRequest.ProtoReflect.Descriptor instead. +func (*UpdateEmployeeMoodRequest) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{0} +} + +func (x *UpdateEmployeeMoodRequest) GetEmployeeId() int32 { + if x != nil { + return x.EmployeeId + } + return 0 +} + +func (x *UpdateEmployeeMoodRequest) GetMood() Mood { + if x != nil { + return x.Mood + } + return Mood_MOOD_UNSPECIFIED +} + +type UpdateEmployeeMoodResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This mutation update the mood of an employee. + UpdateMood *UpdateEmployeeMoodResponse_UpdateMood `protobuf:"bytes,1,opt,name=update_mood,json=updateMood,proto3" json:"update_mood,omitempty"` +} + +func (x *UpdateEmployeeMoodResponse) Reset() { + *x = UpdateEmployeeMoodResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateEmployeeMoodResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateEmployeeMoodResponse) ProtoMessage() {} + +func (x *UpdateEmployeeMoodResponse) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEmployeeMoodResponse.ProtoReflect.Descriptor instead. +func (*UpdateEmployeeMoodResponse) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{1} +} + +func (x *UpdateEmployeeMoodResponse) GetUpdateMood() *UpdateEmployeeMoodResponse_UpdateMood { + if x != nil { + return x.UpdateMood + } + return nil +} + +type GetEmployeeByIdRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EmployeeId int32 `protobuf:"varint,1,opt,name=employee_id,json=employeeId,proto3" json:"employee_id,omitempty"` +} + +func (x *GetEmployeeByIdRequest) Reset() { + *x = GetEmployeeByIdRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeeByIdRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeeByIdRequest) ProtoMessage() {} + +func (x *GetEmployeeByIdRequest) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeeByIdRequest.ProtoReflect.Descriptor instead. +func (*GetEmployeeByIdRequest) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetEmployeeByIdRequest) GetEmployeeId() int32 { + if x != nil { + return x.EmployeeId + } + return 0 +} + +type GetEmployeeByIdResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Employee *GetEmployeeByIdResponse_Employee `protobuf:"bytes,1,opt,name=employee,proto3" json:"employee,omitempty"` +} + +func (x *GetEmployeeByIdResponse) Reset() { + *x = GetEmployeeByIdResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeeByIdResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeeByIdResponse) ProtoMessage() {} + +func (x *GetEmployeeByIdResponse) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeeByIdResponse.ProtoReflect.Descriptor instead. +func (*GetEmployeeByIdResponse) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{3} +} + +func (x *GetEmployeeByIdResponse) GetEmployee() *GetEmployeeByIdResponse_Employee { + if x != nil { + return x.Employee + } + return nil +} + +type FindEmployeesByPetsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HasPets bool `protobuf:"varint,1,opt,name=has_pets,json=hasPets,proto3" json:"has_pets,omitempty"` +} + +func (x *FindEmployeesByPetsRequest) Reset() { + *x = FindEmployeesByPetsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsRequest) ProtoMessage() {} + +func (x *FindEmployeesByPetsRequest) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsRequest.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsRequest) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{4} +} + +func (x *FindEmployeesByPetsRequest) GetHasPets() bool { + if x != nil { + return x.HasPets + } + return false +} + +type FindEmployeesByPetsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is a GraphQL query that retrieves a list of employees. + FindEmployees []*FindEmployeesByPetsResponse_FindEmployees `protobuf:"bytes,1,rep,name=find_employees,json=findEmployees,proto3" json:"find_employees,omitempty"` +} + +func (x *FindEmployeesByPetsResponse) Reset() { + *x = FindEmployeesByPetsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsResponse) ProtoMessage() {} + +func (x *FindEmployeesByPetsResponse) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsResponse.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsResponse) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{5} +} + +func (x *FindEmployeesByPetsResponse) GetFindEmployees() []*FindEmployeesByPetsResponse_FindEmployees { + if x != nil { + return x.FindEmployees + } + return nil +} + +type GetEmployeesWithMoodRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetEmployeesWithMoodRequest) Reset() { + *x = GetEmployeesWithMoodRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeesWithMoodRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeesWithMoodRequest) ProtoMessage() {} + +func (x *GetEmployeesWithMoodRequest) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeesWithMoodRequest.ProtoReflect.Descriptor instead. +func (*GetEmployeesWithMoodRequest) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{6} +} + +type GetEmployeesWithMoodResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Employees []*GetEmployeesWithMoodResponse_Employees `protobuf:"bytes,1,rep,name=employees,proto3" json:"employees,omitempty"` +} + +func (x *GetEmployeesWithMoodResponse) Reset() { + *x = GetEmployeesWithMoodResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeesWithMoodResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeesWithMoodResponse) ProtoMessage() {} + +func (x *GetEmployeesWithMoodResponse) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeesWithMoodResponse.ProtoReflect.Descriptor instead. +func (*GetEmployeesWithMoodResponse) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{7} +} + +func (x *GetEmployeesWithMoodResponse) GetEmployees() []*GetEmployeesWithMoodResponse_Employees { + if x != nil { + return x.Employees + } + return nil +} + +type GetEmployeesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetEmployeesRequest) Reset() { + *x = GetEmployeesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeesRequest) ProtoMessage() {} + +func (x *GetEmployeesRequest) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeesRequest.ProtoReflect.Descriptor instead. +func (*GetEmployeesRequest) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{8} +} + +type GetEmployeesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Employees []*GetEmployeesResponse_Employees `protobuf:"bytes,1,rep,name=employees,proto3" json:"employees,omitempty"` +} + +func (x *GetEmployeesResponse) Reset() { + *x = GetEmployeesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeesResponse) ProtoMessage() {} + +func (x *GetEmployeesResponse) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeesResponse.ProtoReflect.Descriptor instead. +func (*GetEmployeesResponse) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{9} +} + +func (x *GetEmployeesResponse) GetEmployees() []*GetEmployeesResponse_Employees { + if x != nil { + return x.Employees + } + return nil +} + +type FindEmployeesByPetsInlineFragmentRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HasPets bool `protobuf:"varint,1,opt,name=has_pets,json=hasPets,proto3" json:"has_pets,omitempty"` +} + +func (x *FindEmployeesByPetsInlineFragmentRequest) Reset() { + *x = FindEmployeesByPetsInlineFragmentRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsInlineFragmentRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsInlineFragmentRequest) ProtoMessage() {} + +func (x *FindEmployeesByPetsInlineFragmentRequest) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsInlineFragmentRequest.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsInlineFragmentRequest) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{10} +} + +func (x *FindEmployeesByPetsInlineFragmentRequest) GetHasPets() bool { + if x != nil { + return x.HasPets + } + return false +} + +type FindEmployeesByPetsInlineFragmentResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is a GraphQL query that retrieves a list of employees. + FindEmployees []*FindEmployeesByPetsInlineFragmentResponse_FindEmployees `protobuf:"bytes,1,rep,name=find_employees,json=findEmployees,proto3" json:"find_employees,omitempty"` +} + +func (x *FindEmployeesByPetsInlineFragmentResponse) Reset() { + *x = FindEmployeesByPetsInlineFragmentResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsInlineFragmentResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsInlineFragmentResponse) ProtoMessage() {} + +func (x *FindEmployeesByPetsInlineFragmentResponse) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsInlineFragmentResponse.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsInlineFragmentResponse) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{11} +} + +func (x *FindEmployeesByPetsInlineFragmentResponse) GetFindEmployees() []*FindEmployeesByPetsInlineFragmentResponse_FindEmployees { + if x != nil { + return x.FindEmployees + } + return nil +} + +type FindEmployeesByPetsNamedFragmentRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HasPets bool `protobuf:"varint,1,opt,name=has_pets,json=hasPets,proto3" json:"has_pets,omitempty"` +} + +func (x *FindEmployeesByPetsNamedFragmentRequest) Reset() { + *x = FindEmployeesByPetsNamedFragmentRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsNamedFragmentRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsNamedFragmentRequest) ProtoMessage() {} + +func (x *FindEmployeesByPetsNamedFragmentRequest) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsNamedFragmentRequest.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsNamedFragmentRequest) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{12} +} + +func (x *FindEmployeesByPetsNamedFragmentRequest) GetHasPets() bool { + if x != nil { + return x.HasPets + } + return false +} + +type FindEmployeesByPetsNamedFragmentResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is a GraphQL query that retrieves a list of employees. + FindEmployees []*FindEmployeesByPetsNamedFragmentResponse_FindEmployees `protobuf:"bytes,1,rep,name=find_employees,json=findEmployees,proto3" json:"find_employees,omitempty"` +} + +func (x *FindEmployeesByPetsNamedFragmentResponse) Reset() { + *x = FindEmployeesByPetsNamedFragmentResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsNamedFragmentResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsNamedFragmentResponse) ProtoMessage() {} + +func (x *FindEmployeesByPetsNamedFragmentResponse) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsNamedFragmentResponse.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsNamedFragmentResponse) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{13} +} + +func (x *FindEmployeesByPetsNamedFragmentResponse) GetFindEmployees() []*FindEmployeesByPetsNamedFragmentResponse_FindEmployees { + if x != nil { + return x.FindEmployees + } + return nil +} + +type UpdateEmployeeMoodResponse_UpdateMood struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Details *UpdateEmployeeMoodResponse_UpdateMood_Details `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` + CurrentMood Mood `protobuf:"varint,3,opt,name=current_mood,json=currentMood,proto3,enum=employee.v1.Mood" json:"current_mood,omitempty"` + DerivedMood Mood `protobuf:"varint,4,opt,name=derived_mood,json=derivedMood,proto3,enum=employee.v1.Mood" json:"derived_mood,omitempty"` +} + +func (x *UpdateEmployeeMoodResponse_UpdateMood) Reset() { + *x = UpdateEmployeeMoodResponse_UpdateMood{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateEmployeeMoodResponse_UpdateMood) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateEmployeeMoodResponse_UpdateMood) ProtoMessage() {} + +func (x *UpdateEmployeeMoodResponse_UpdateMood) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEmployeeMoodResponse_UpdateMood.ProtoReflect.Descriptor instead. +func (*UpdateEmployeeMoodResponse_UpdateMood) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *UpdateEmployeeMoodResponse_UpdateMood) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *UpdateEmployeeMoodResponse_UpdateMood) GetDetails() *UpdateEmployeeMoodResponse_UpdateMood_Details { + if x != nil { + return x.Details + } + return nil +} + +func (x *UpdateEmployeeMoodResponse_UpdateMood) GetCurrentMood() Mood { + if x != nil { + return x.CurrentMood + } + return Mood_MOOD_UNSPECIFIED +} + +func (x *UpdateEmployeeMoodResponse_UpdateMood) GetDerivedMood() Mood { + if x != nil { + return x.DerivedMood + } + return Mood_MOOD_UNSPECIFIED +} + +type UpdateEmployeeMoodResponse_UpdateMood_Details struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Forename string `protobuf:"bytes,5,opt,name=forename,proto3" json:"forename,omitempty"` + Surname string `protobuf:"bytes,6,opt,name=surname,proto3" json:"surname,omitempty"` +} + +func (x *UpdateEmployeeMoodResponse_UpdateMood_Details) Reset() { + *x = UpdateEmployeeMoodResponse_UpdateMood_Details{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateEmployeeMoodResponse_UpdateMood_Details) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateEmployeeMoodResponse_UpdateMood_Details) ProtoMessage() {} + +func (x *UpdateEmployeeMoodResponse_UpdateMood_Details) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEmployeeMoodResponse_UpdateMood_Details.ProtoReflect.Descriptor instead. +func (*UpdateEmployeeMoodResponse_UpdateMood_Details) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{1, 0, 0} +} + +func (x *UpdateEmployeeMoodResponse_UpdateMood_Details) GetForename() string { + if x != nil { + return x.Forename + } + return "" +} + +func (x *UpdateEmployeeMoodResponse_UpdateMood_Details) GetSurname() string { + if x != nil { + return x.Surname + } + return "" +} + +type GetEmployeeByIdResponse_Employee struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty"` + Details *GetEmployeeByIdResponse_Employee_Details `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *GetEmployeeByIdResponse_Employee) Reset() { + *x = GetEmployeeByIdResponse_Employee{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeeByIdResponse_Employee) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeeByIdResponse_Employee) ProtoMessage() {} + +func (x *GetEmployeeByIdResponse_Employee) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeeByIdResponse_Employee.ProtoReflect.Descriptor instead. +func (*GetEmployeeByIdResponse_Employee) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *GetEmployeeByIdResponse_Employee) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *GetEmployeeByIdResponse_Employee) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *GetEmployeeByIdResponse_Employee) GetDetails() *GetEmployeeByIdResponse_Employee_Details { + if x != nil { + return x.Details + } + return nil +} + +type GetEmployeeByIdResponse_Employee_Details struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Forename string `protobuf:"bytes,5,opt,name=forename,proto3" json:"forename,omitempty"` + Surname string `protobuf:"bytes,6,opt,name=surname,proto3" json:"surname,omitempty"` + Pets []*GetEmployeeByIdResponse_Employee_Details_Pets `protobuf:"bytes,9,rep,name=pets,proto3" json:"pets,omitempty"` + Location *GetEmployeeByIdResponse_Employee_Details_Location `protobuf:"bytes,10,opt,name=location,proto3" json:"location,omitempty"` +} + +func (x *GetEmployeeByIdResponse_Employee_Details) Reset() { + *x = GetEmployeeByIdResponse_Employee_Details{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeeByIdResponse_Employee_Details) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeeByIdResponse_Employee_Details) ProtoMessage() {} + +func (x *GetEmployeeByIdResponse_Employee_Details) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeeByIdResponse_Employee_Details.ProtoReflect.Descriptor instead. +func (*GetEmployeeByIdResponse_Employee_Details) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{3, 0, 0} +} + +func (x *GetEmployeeByIdResponse_Employee_Details) GetForename() string { + if x != nil { + return x.Forename + } + return "" +} + +func (x *GetEmployeeByIdResponse_Employee_Details) GetSurname() string { + if x != nil { + return x.Surname + } + return "" +} + +func (x *GetEmployeeByIdResponse_Employee_Details) GetPets() []*GetEmployeeByIdResponse_Employee_Details_Pets { + if x != nil { + return x.Pets + } + return nil +} + +func (x *GetEmployeeByIdResponse_Employee_Details) GetLocation() *GetEmployeeByIdResponse_Employee_Details_Location { + if x != nil { + return x.Location + } + return nil +} + +type GetEmployeeByIdResponse_Employee_Details_Pets struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetEmployeeByIdResponse_Employee_Details_Pets) Reset() { + *x = GetEmployeeByIdResponse_Employee_Details_Pets{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeeByIdResponse_Employee_Details_Pets) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeeByIdResponse_Employee_Details_Pets) ProtoMessage() {} + +func (x *GetEmployeeByIdResponse_Employee_Details_Pets) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeeByIdResponse_Employee_Details_Pets.ProtoReflect.Descriptor instead. +func (*GetEmployeeByIdResponse_Employee_Details_Pets) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{3, 0, 0, 0} +} + +func (x *GetEmployeeByIdResponse_Employee_Details_Pets) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type GetEmployeeByIdResponse_Employee_Details_Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *GetEmployeeByIdResponse_Employee_Details_Location_Key `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *GetEmployeeByIdResponse_Employee_Details_Location) Reset() { + *x = GetEmployeeByIdResponse_Employee_Details_Location{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeeByIdResponse_Employee_Details_Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeeByIdResponse_Employee_Details_Location) ProtoMessage() {} + +func (x *GetEmployeeByIdResponse_Employee_Details_Location) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeeByIdResponse_Employee_Details_Location.ProtoReflect.Descriptor instead. +func (*GetEmployeeByIdResponse_Employee_Details_Location) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{3, 0, 0, 1} +} + +func (x *GetEmployeeByIdResponse_Employee_Details_Location) GetKey() *GetEmployeeByIdResponse_Employee_Details_Location_Key { + if x != nil { + return x.Key + } + return nil +} + +type GetEmployeeByIdResponse_Employee_Details_Location_Key struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetEmployeeByIdResponse_Employee_Details_Location_Key) Reset() { + *x = GetEmployeeByIdResponse_Employee_Details_Location_Key{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeeByIdResponse_Employee_Details_Location_Key) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeeByIdResponse_Employee_Details_Location_Key) ProtoMessage() {} + +func (x *GetEmployeeByIdResponse_Employee_Details_Location_Key) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeeByIdResponse_Employee_Details_Location_Key.ProtoReflect.Descriptor instead. +func (*GetEmployeeByIdResponse_Employee_Details_Location_Key) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{3, 0, 0, 1, 0} +} + +func (x *GetEmployeeByIdResponse_Employee_Details_Location_Key) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type FindEmployeesByPetsResponse_FindEmployees struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Details *FindEmployeesByPetsResponse_FindEmployees_Details `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *FindEmployeesByPetsResponse_FindEmployees) Reset() { + *x = FindEmployeesByPetsResponse_FindEmployees{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsResponse_FindEmployees) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsResponse_FindEmployees) ProtoMessage() {} + +func (x *FindEmployeesByPetsResponse_FindEmployees) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsResponse_FindEmployees.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsResponse_FindEmployees) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *FindEmployeesByPetsResponse_FindEmployees) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *FindEmployeesByPetsResponse_FindEmployees) GetDetails() *FindEmployeesByPetsResponse_FindEmployees_Details { + if x != nil { + return x.Details + } + return nil +} + +type FindEmployeesByPetsResponse_FindEmployees_Details struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Forename string `protobuf:"bytes,5,opt,name=forename,proto3" json:"forename,omitempty"` + Surname string `protobuf:"bytes,6,opt,name=surname,proto3" json:"surname,omitempty"` + Pets []*FindEmployeesByPetsResponse_FindEmployees_Details_Pets `protobuf:"bytes,9,rep,name=pets,proto3" json:"pets,omitempty"` +} + +func (x *FindEmployeesByPetsResponse_FindEmployees_Details) Reset() { + *x = FindEmployeesByPetsResponse_FindEmployees_Details{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsResponse_FindEmployees_Details) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsResponse_FindEmployees_Details) ProtoMessage() {} + +func (x *FindEmployeesByPetsResponse_FindEmployees_Details) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsResponse_FindEmployees_Details.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsResponse_FindEmployees_Details) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{5, 0, 0} +} + +func (x *FindEmployeesByPetsResponse_FindEmployees_Details) GetForename() string { + if x != nil { + return x.Forename + } + return "" +} + +func (x *FindEmployeesByPetsResponse_FindEmployees_Details) GetSurname() string { + if x != nil { + return x.Surname + } + return "" +} + +func (x *FindEmployeesByPetsResponse_FindEmployees_Details) GetPets() []*FindEmployeesByPetsResponse_FindEmployees_Details_Pets { + if x != nil { + return x.Pets + } + return nil +} + +type FindEmployeesByPetsResponse_FindEmployees_Details_Pets struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *FindEmployeesByPetsResponse_FindEmployees_Details_Pets) Reset() { + *x = FindEmployeesByPetsResponse_FindEmployees_Details_Pets{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsResponse_FindEmployees_Details_Pets) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsResponse_FindEmployees_Details_Pets) ProtoMessage() {} + +func (x *FindEmployeesByPetsResponse_FindEmployees_Details_Pets) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsResponse_FindEmployees_Details_Pets.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsResponse_FindEmployees_Details_Pets) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{5, 0, 0, 0} +} + +func (x *FindEmployeesByPetsResponse_FindEmployees_Details_Pets) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type GetEmployeesWithMoodResponse_Employees struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Details *GetEmployeesWithMoodResponse_Employees_Details `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"` + CurrentMood Mood `protobuf:"varint,4,opt,name=current_mood,json=currentMood,proto3,enum=employee.v1.Mood" json:"current_mood,omitempty"` +} + +func (x *GetEmployeesWithMoodResponse_Employees) Reset() { + *x = GetEmployeesWithMoodResponse_Employees{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeesWithMoodResponse_Employees) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeesWithMoodResponse_Employees) ProtoMessage() {} + +func (x *GetEmployeesWithMoodResponse_Employees) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeesWithMoodResponse_Employees.ProtoReflect.Descriptor instead. +func (*GetEmployeesWithMoodResponse_Employees) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *GetEmployeesWithMoodResponse_Employees) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *GetEmployeesWithMoodResponse_Employees) GetDetails() *GetEmployeesWithMoodResponse_Employees_Details { + if x != nil { + return x.Details + } + return nil +} + +func (x *GetEmployeesWithMoodResponse_Employees) GetCurrentMood() Mood { + if x != nil { + return x.CurrentMood + } + return Mood_MOOD_UNSPECIFIED +} + +type GetEmployeesWithMoodResponse_Employees_Details struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pets []*GetEmployeesWithMoodResponse_Employees_Details_Pets `protobuf:"bytes,9,rep,name=pets,proto3" json:"pets,omitempty"` +} + +func (x *GetEmployeesWithMoodResponse_Employees_Details) Reset() { + *x = GetEmployeesWithMoodResponse_Employees_Details{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeesWithMoodResponse_Employees_Details) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeesWithMoodResponse_Employees_Details) ProtoMessage() {} + +func (x *GetEmployeesWithMoodResponse_Employees_Details) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeesWithMoodResponse_Employees_Details.ProtoReflect.Descriptor instead. +func (*GetEmployeesWithMoodResponse_Employees_Details) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{7, 0, 0} +} + +func (x *GetEmployeesWithMoodResponse_Employees_Details) GetPets() []*GetEmployeesWithMoodResponse_Employees_Details_Pets { + if x != nil { + return x.Pets + } + return nil +} + +type GetEmployeesWithMoodResponse_Employees_Details_Pets struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Gender Gender `protobuf:"varint,3,opt,name=gender,proto3,enum=employee.v1.Gender" json:"gender,omitempty"` +} + +func (x *GetEmployeesWithMoodResponse_Employees_Details_Pets) Reset() { + *x = GetEmployeesWithMoodResponse_Employees_Details_Pets{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeesWithMoodResponse_Employees_Details_Pets) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeesWithMoodResponse_Employees_Details_Pets) ProtoMessage() {} + +func (x *GetEmployeesWithMoodResponse_Employees_Details_Pets) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeesWithMoodResponse_Employees_Details_Pets.ProtoReflect.Descriptor instead. +func (*GetEmployeesWithMoodResponse_Employees_Details_Pets) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{7, 0, 0, 0} +} + +func (x *GetEmployeesWithMoodResponse_Employees_Details_Pets) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GetEmployeesWithMoodResponse_Employees_Details_Pets) GetGender() Gender { + if x != nil { + return x.Gender + } + return Gender_GENDER_UNSPECIFIED +} + +type GetEmployeesResponse_Employees struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Details *GetEmployeesResponse_Employees_Details `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *GetEmployeesResponse_Employees) Reset() { + *x = GetEmployeesResponse_Employees{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeesResponse_Employees) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeesResponse_Employees) ProtoMessage() {} + +func (x *GetEmployeesResponse_Employees) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeesResponse_Employees.ProtoReflect.Descriptor instead. +func (*GetEmployeesResponse_Employees) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{9, 0} +} + +func (x *GetEmployeesResponse_Employees) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *GetEmployeesResponse_Employees) GetDetails() *GetEmployeesResponse_Employees_Details { + if x != nil { + return x.Details + } + return nil +} + +type GetEmployeesResponse_Employees_Details struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Forename string `protobuf:"bytes,11,opt,name=forename,proto3" json:"forename,omitempty"` + Surname string `protobuf:"bytes,12,opt,name=surname,proto3" json:"surname,omitempty"` + HasChildren bool `protobuf:"varint,13,opt,name=has_children,json=hasChildren,proto3" json:"has_children,omitempty"` +} + +func (x *GetEmployeesResponse_Employees_Details) Reset() { + *x = GetEmployeesResponse_Employees_Details{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEmployeesResponse_Employees_Details) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEmployeesResponse_Employees_Details) ProtoMessage() {} + +func (x *GetEmployeesResponse_Employees_Details) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEmployeesResponse_Employees_Details.ProtoReflect.Descriptor instead. +func (*GetEmployeesResponse_Employees_Details) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{9, 0, 0} +} + +func (x *GetEmployeesResponse_Employees_Details) GetForename() string { + if x != nil { + return x.Forename + } + return "" +} + +func (x *GetEmployeesResponse_Employees_Details) GetSurname() string { + if x != nil { + return x.Surname + } + return "" +} + +func (x *GetEmployeesResponse_Employees_Details) GetHasChildren() bool { + if x != nil { + return x.HasChildren + } + return false +} + +type FindEmployeesByPetsInlineFragmentResponse_FindEmployees struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Details *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees) Reset() { + *x = FindEmployeesByPetsInlineFragmentResponse_FindEmployees{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsInlineFragmentResponse_FindEmployees) ProtoMessage() {} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsInlineFragmentResponse_FindEmployees.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsInlineFragmentResponse_FindEmployees) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees) GetDetails() *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details { + if x != nil { + return x.Details + } + return nil +} + +type FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Forename string `protobuf:"bytes,11,opt,name=forename,proto3" json:"forename,omitempty"` + Surname string `protobuf:"bytes,12,opt,name=surname,proto3" json:"surname,omitempty"` + Pets []*FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets `protobuf:"bytes,14,rep,name=pets,proto3" json:"pets,omitempty"` +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details) Reset() { + *x = FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details) ProtoMessage() {} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{11, 0, 0} +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details) GetForename() string { + if x != nil { + return x.Forename + } + return "" +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details) GetSurname() string { + if x != nil { + return x.Surname + } + return "" +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details) GetPets() []*FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets { + if x != nil { + return x.Pets + } + return nil +} + +type FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets) Reset() { + *x = FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets) ProtoMessage() {} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{11, 0, 0, 0} +} + +func (x *FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type FindEmployeesByPetsNamedFragmentResponse_FindEmployees struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Details *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees) Reset() { + *x = FindEmployeesByPetsNamedFragmentResponse_FindEmployees{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsNamedFragmentResponse_FindEmployees) ProtoMessage() {} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsNamedFragmentResponse_FindEmployees.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsNamedFragmentResponse_FindEmployees) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{13, 0} +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees) GetDetails() *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details { + if x != nil { + return x.Details + } + return nil +} + +type FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Forename string `protobuf:"bytes,11,opt,name=forename,proto3" json:"forename,omitempty"` + Surname string `protobuf:"bytes,12,opt,name=surname,proto3" json:"surname,omitempty"` + Pets []*FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets `protobuf:"bytes,14,rep,name=pets,proto3" json:"pets,omitempty"` +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details) Reset() { + *x = FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details) ProtoMessage() {} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{13, 0, 0} +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details) GetForename() string { + if x != nil { + return x.Forename + } + return "" +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details) GetSurname() string { + if x != nil { + return x.Surname + } + return "" +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details) GetPets() []*FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets { + if x != nil { + return x.Pets + } + return nil +} + +type FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets) Reset() { + *x = FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets{} + if protoimpl.UnsafeEnabled { + mi := &file_employee_v1_service_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets) ProtoMessage() {} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets) ProtoReflect() protoreflect.Message { + mi := &file_employee_v1_service_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets.ProtoReflect.Descriptor instead. +func (*FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets) Descriptor() ([]byte, []int) { + return file_employee_v1_service_proto_rawDescGZIP(), []int{13, 0, 0, 0} +} + +func (x *FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_employee_v1_service_proto protoreflect.FileDescriptor + +var file_employee_v1_service_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x65, 0x6d, 0x70, + 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x22, 0x63, 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x4d, 0x6f, 0x6f, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x65, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x04, 0x6d, 0x6f, 0x6f, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x6f, 0x64, 0x52, 0x04, 0x6d, 0x6f, 0x6f, 0x64, 0x22, 0x93, 0x03, + 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, + 0x4d, 0x6f, 0x6f, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x6f, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x4d, 0x6f, + 0x6f, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4d, 0x6f, 0x6f, 0x64, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x6f, 0x6f, + 0x64, 0x1a, 0x9f, 0x02, 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x6f, 0x6f, 0x64, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x54, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x4d, 0x6f, + 0x6f, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4d, 0x6f, 0x6f, 0x64, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x34, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x5f, 0x6d, 0x6f, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x65, + 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x6f, 0x64, 0x52, + 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x6f, 0x64, 0x12, 0x34, 0x0a, 0x0c, + 0x64, 0x65, 0x72, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x6f, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x4d, 0x6f, 0x6f, 0x64, 0x52, 0x0b, 0x64, 0x65, 0x72, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x6f, + 0x6f, 0x64, 0x1a, 0x3f, 0x0a, 0x07, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1a, 0x0a, + 0x08, 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x72, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x72, 0x6e, + 0x61, 0x6d, 0x65, 0x22, 0x39, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, + 0x65, 0x65, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0a, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x49, 0x64, 0x22, 0xeb, + 0x04, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x42, 0x79, + 0x49, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x65, 0x6d, + 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, + 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, + 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x52, 0x08, 0x65, 0x6d, 0x70, + 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x1a, 0x84, 0x04, 0x0a, 0x08, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, + 0x65, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x74, 0x61, 0x67, 0x12, 0x4f, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x42, + 0x79, 0x49, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x84, 0x03, 0x0a, 0x07, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x04, 0x70, 0x65, 0x74, 0x73, 0x18, + 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x42, + 0x79, 0x49, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x50, 0x65, 0x74, + 0x73, 0x52, 0x04, 0x70, 0x65, 0x74, 0x73, 0x12, 0x5a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x1a, 0x0a, 0x04, 0x50, 0x65, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, + 0x7b, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, + 0x65, 0x65, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, + 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x1a, 0x19, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x1a, + 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, + 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x61, + 0x73, 0x5f, 0x70, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x68, 0x61, + 0x73, 0x50, 0x65, 0x74, 0x73, 0x22, 0xaf, 0x03, 0x0a, 0x1b, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, + 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x64, 0x5f, 0x65, 0x6d, + 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, + 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, + 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x73, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x73, 0x1a, 0xb0, 0x02, 0x0a, 0x0d, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, + 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x58, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, + 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, + 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x2e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x1a, 0xb4, 0x01, 0x0a, 0x07, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x72, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x04, 0x70, 0x65, 0x74, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x43, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, + 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, + 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x2e, 0x50, 0x65, 0x74, 0x73, 0x52, 0x04, 0x70, 0x65, 0x74, 0x73, 0x1a, 0x1a, 0x0a, 0x04, 0x50, + 0x65, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x45, 0x6d, + 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x6f, 0x6f, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc7, 0x03, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x45, 0x6d, + 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x6f, 0x6f, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x09, 0x65, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6d, 0x70, + 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x6f, 0x6f, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x52, + 0x09, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x1a, 0xd3, 0x02, 0x0a, 0x09, 0x45, + 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x55, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x6f, 0x6f, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x2e, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, + 0x34, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x6f, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x6f, 0x64, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x4d, 0x6f, 0x6f, 0x64, 0x1a, 0xa8, 0x01, 0x0a, 0x07, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x12, 0x54, 0x0a, 0x04, 0x70, 0x65, 0x74, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x40, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, + 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x6f, + 0x6f, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x50, 0x65, 0x74, + 0x73, 0x52, 0x04, 0x70, 0x65, 0x74, 0x73, 0x1a, 0x47, 0x0a, 0x04, 0x50, 0x65, 0x74, 0x73, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x06, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72, + 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb2, 0x02, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x45, + 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x49, 0x0a, 0x09, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, + 0x52, 0x09, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x1a, 0xce, 0x01, 0x0a, 0x09, + 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6d, 0x70, + 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6d, + 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x62, 0x0a, 0x07, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x75, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x61, 0x73, + 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x68, 0x61, 0x73, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22, 0x45, 0x0a, 0x28, + 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, + 0x65, 0x74, 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x61, 0x73, 0x5f, + 0x70, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x68, 0x61, 0x73, 0x50, + 0x65, 0x74, 0x73, 0x22, 0xe7, 0x03, 0x0a, 0x29, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x6b, 0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x64, 0x5f, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, + 0x65, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x65, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x52, + 0x0d, 0x66, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x1a, 0xcc, + 0x02, 0x0a, 0x0d, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x66, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x4c, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, + 0x65, 0x74, 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, + 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0xc2, 0x01, 0x0a, 0x07, 0x44, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x75, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x65, 0x0a, 0x04, 0x70, 0x65, + 0x74, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x51, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, + 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x2e, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x50, 0x65, 0x74, 0x73, 0x52, 0x04, 0x70, 0x65, 0x74, + 0x73, 0x1a, 0x1a, 0x0a, 0x04, 0x50, 0x65, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x44, 0x0a, + 0x27, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, + 0x50, 0x65, 0x74, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x61, 0x73, 0x5f, + 0x70, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x68, 0x61, 0x73, 0x50, + 0x65, 0x74, 0x73, 0x22, 0xe3, 0x03, 0x0a, 0x28, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x6a, 0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x64, 0x5f, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x46, + 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x52, 0x0d, 0x66, + 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x1a, 0xca, 0x02, 0x0a, + 0x0d, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x65, + 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x4b, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, + 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, + 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0xc1, 0x01, 0x0a, 0x07, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x6f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x64, 0x0a, 0x04, 0x70, 0x65, 0x74, 0x73, 0x18, + 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, + 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x46, 0x72, 0x61, 0x67, + 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6e, + 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x2e, 0x50, 0x65, 0x74, 0x73, 0x52, 0x04, 0x70, 0x65, 0x74, 0x73, 0x1a, 0x1a, 0x0a, + 0x04, 0x50, 0x65, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x2a, 0x3a, 0x0a, 0x04, 0x4d, 0x6f, 0x6f, + 0x64, 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x4f, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x4f, 0x4f, 0x44, 0x5f, + 0x48, 0x41, 0x50, 0x50, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x4f, 0x4f, 0x44, 0x5f, + 0x53, 0x41, 0x44, 0x10, 0x02, 0x2a, 0x58, 0x0a, 0x06, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, + 0x16, 0x0a, 0x12, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x44, 0x45, + 0x52, 0x5f, 0x46, 0x45, 0x4d, 0x41, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x47, 0x45, + 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x47, + 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x03, 0x32, + 0xc9, 0x06, 0x0a, 0x0f, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x6d, 0x0a, 0x13, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x12, 0x27, 0x2e, 0x65, 0x6d, 0x70, + 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, + 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, + 0x79, 0x50, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0x90, + 0x02, 0x01, 0x12, 0x97, 0x01, 0x0a, 0x21, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, + 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x35, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, + 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x36, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, + 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, + 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0x90, 0x02, 0x01, 0x12, 0x94, 0x01, 0x0a, + 0x20, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, + 0x50, 0x65, 0x74, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, + 0x74, 0x12, 0x34, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, + 0x65, 0x74, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, + 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, + 0x65, 0x65, 0x73, 0x42, 0x79, 0x50, 0x65, 0x74, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x46, 0x72, + 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, + 0x90, 0x02, 0x01, 0x12, 0x61, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, + 0x65, 0x65, 0x42, 0x79, 0x49, 0x64, 0x12, 0x23, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, + 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x65, 0x6d, + 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, + 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x03, 0x90, 0x02, 0x01, 0x12, 0x58, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, + 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x12, 0x20, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, + 0x65, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0x90, 0x02, 0x01, + 0x12, 0x70, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, + 0x57, 0x69, 0x74, 0x68, 0x4d, 0x6f, 0x6f, 0x64, 0x12, 0x28, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, + 0x65, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x6f, 0x6f, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x73, 0x57, 0x69, 0x74, + 0x68, 0x4d, 0x6f, 0x6f, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0x90, + 0x02, 0x01, 0x12, 0x67, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6d, 0x70, 0x6c, + 0x6f, 0x79, 0x65, 0x65, 0x4d, 0x6f, 0x6f, 0x64, 0x12, 0x26, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6d, 0x70, + 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x4d, 0x6f, 0x6f, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x27, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x4d, 0x6f, 0x6f, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xc9, 0x01, 0x0a, 0x0f, + 0x63, 0x6f, 0x6d, 0x2e, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, 0x31, 0x42, + 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x5b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x77, 0x75, 0x6e, 0x64, + 0x65, 0x72, 0x67, 0x72, 0x61, 0x70, 0x68, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x2f, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x2d, 0x74, 0x65, 0x73, 0x74, 0x73, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x64, + 0x61, 0x74, 0x61, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x72, 0x70, 0x63, 0x2f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x76, + 0x31, 0x3b, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x45, + 0x58, 0x58, 0xaa, 0x02, 0x0b, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x2e, 0x56, 0x31, + 0xca, 0x02, 0x0b, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, + 0x17, 0x45, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0c, 0x45, 0x6d, 0x70, 0x6c, 0x6f, + 0x79, 0x65, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_employee_v1_service_proto_rawDescOnce sync.Once + file_employee_v1_service_proto_rawDescData = file_employee_v1_service_proto_rawDesc +) + +func file_employee_v1_service_proto_rawDescGZIP() []byte { + file_employee_v1_service_proto_rawDescOnce.Do(func() { + file_employee_v1_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_employee_v1_service_proto_rawDescData) + }) + return file_employee_v1_service_proto_rawDescData +} + +var file_employee_v1_service_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_employee_v1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 35) +var file_employee_v1_service_proto_goTypes = []any{ + (Mood)(0), // 0: employee.v1.Mood + (Gender)(0), // 1: employee.v1.Gender + (*UpdateEmployeeMoodRequest)(nil), // 2: employee.v1.UpdateEmployeeMoodRequest + (*UpdateEmployeeMoodResponse)(nil), // 3: employee.v1.UpdateEmployeeMoodResponse + (*GetEmployeeByIdRequest)(nil), // 4: employee.v1.GetEmployeeByIdRequest + (*GetEmployeeByIdResponse)(nil), // 5: employee.v1.GetEmployeeByIdResponse + (*FindEmployeesByPetsRequest)(nil), // 6: employee.v1.FindEmployeesByPetsRequest + (*FindEmployeesByPetsResponse)(nil), // 7: employee.v1.FindEmployeesByPetsResponse + (*GetEmployeesWithMoodRequest)(nil), // 8: employee.v1.GetEmployeesWithMoodRequest + (*GetEmployeesWithMoodResponse)(nil), // 9: employee.v1.GetEmployeesWithMoodResponse + (*GetEmployeesRequest)(nil), // 10: employee.v1.GetEmployeesRequest + (*GetEmployeesResponse)(nil), // 11: employee.v1.GetEmployeesResponse + (*FindEmployeesByPetsInlineFragmentRequest)(nil), // 12: employee.v1.FindEmployeesByPetsInlineFragmentRequest + (*FindEmployeesByPetsInlineFragmentResponse)(nil), // 13: employee.v1.FindEmployeesByPetsInlineFragmentResponse + (*FindEmployeesByPetsNamedFragmentRequest)(nil), // 14: employee.v1.FindEmployeesByPetsNamedFragmentRequest + (*FindEmployeesByPetsNamedFragmentResponse)(nil), // 15: employee.v1.FindEmployeesByPetsNamedFragmentResponse + (*UpdateEmployeeMoodResponse_UpdateMood)(nil), // 16: employee.v1.UpdateEmployeeMoodResponse.UpdateMood + (*UpdateEmployeeMoodResponse_UpdateMood_Details)(nil), // 17: employee.v1.UpdateEmployeeMoodResponse.UpdateMood.Details + (*GetEmployeeByIdResponse_Employee)(nil), // 18: employee.v1.GetEmployeeByIdResponse.Employee + (*GetEmployeeByIdResponse_Employee_Details)(nil), // 19: employee.v1.GetEmployeeByIdResponse.Employee.Details + (*GetEmployeeByIdResponse_Employee_Details_Pets)(nil), // 20: employee.v1.GetEmployeeByIdResponse.Employee.Details.Pets + (*GetEmployeeByIdResponse_Employee_Details_Location)(nil), // 21: employee.v1.GetEmployeeByIdResponse.Employee.Details.Location + (*GetEmployeeByIdResponse_Employee_Details_Location_Key)(nil), // 22: employee.v1.GetEmployeeByIdResponse.Employee.Details.Location.Key + (*FindEmployeesByPetsResponse_FindEmployees)(nil), // 23: employee.v1.FindEmployeesByPetsResponse.FindEmployees + (*FindEmployeesByPetsResponse_FindEmployees_Details)(nil), // 24: employee.v1.FindEmployeesByPetsResponse.FindEmployees.Details + (*FindEmployeesByPetsResponse_FindEmployees_Details_Pets)(nil), // 25: employee.v1.FindEmployeesByPetsResponse.FindEmployees.Details.Pets + (*GetEmployeesWithMoodResponse_Employees)(nil), // 26: employee.v1.GetEmployeesWithMoodResponse.Employees + (*GetEmployeesWithMoodResponse_Employees_Details)(nil), // 27: employee.v1.GetEmployeesWithMoodResponse.Employees.Details + (*GetEmployeesWithMoodResponse_Employees_Details_Pets)(nil), // 28: employee.v1.GetEmployeesWithMoodResponse.Employees.Details.Pets + (*GetEmployeesResponse_Employees)(nil), // 29: employee.v1.GetEmployeesResponse.Employees + (*GetEmployeesResponse_Employees_Details)(nil), // 30: employee.v1.GetEmployeesResponse.Employees.Details + (*FindEmployeesByPetsInlineFragmentResponse_FindEmployees)(nil), // 31: employee.v1.FindEmployeesByPetsInlineFragmentResponse.FindEmployees + (*FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details)(nil), // 32: employee.v1.FindEmployeesByPetsInlineFragmentResponse.FindEmployees.Details + (*FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets)(nil), // 33: employee.v1.FindEmployeesByPetsInlineFragmentResponse.FindEmployees.Details.Pets + (*FindEmployeesByPetsNamedFragmentResponse_FindEmployees)(nil), // 34: employee.v1.FindEmployeesByPetsNamedFragmentResponse.FindEmployees + (*FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details)(nil), // 35: employee.v1.FindEmployeesByPetsNamedFragmentResponse.FindEmployees.Details + (*FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets)(nil), // 36: employee.v1.FindEmployeesByPetsNamedFragmentResponse.FindEmployees.Details.Pets +} +var file_employee_v1_service_proto_depIdxs = []int32{ + 0, // 0: employee.v1.UpdateEmployeeMoodRequest.mood:type_name -> employee.v1.Mood + 16, // 1: employee.v1.UpdateEmployeeMoodResponse.update_mood:type_name -> employee.v1.UpdateEmployeeMoodResponse.UpdateMood + 18, // 2: employee.v1.GetEmployeeByIdResponse.employee:type_name -> employee.v1.GetEmployeeByIdResponse.Employee + 23, // 3: employee.v1.FindEmployeesByPetsResponse.find_employees:type_name -> employee.v1.FindEmployeesByPetsResponse.FindEmployees + 26, // 4: employee.v1.GetEmployeesWithMoodResponse.employees:type_name -> employee.v1.GetEmployeesWithMoodResponse.Employees + 29, // 5: employee.v1.GetEmployeesResponse.employees:type_name -> employee.v1.GetEmployeesResponse.Employees + 31, // 6: employee.v1.FindEmployeesByPetsInlineFragmentResponse.find_employees:type_name -> employee.v1.FindEmployeesByPetsInlineFragmentResponse.FindEmployees + 34, // 7: employee.v1.FindEmployeesByPetsNamedFragmentResponse.find_employees:type_name -> employee.v1.FindEmployeesByPetsNamedFragmentResponse.FindEmployees + 17, // 8: employee.v1.UpdateEmployeeMoodResponse.UpdateMood.details:type_name -> employee.v1.UpdateEmployeeMoodResponse.UpdateMood.Details + 0, // 9: employee.v1.UpdateEmployeeMoodResponse.UpdateMood.current_mood:type_name -> employee.v1.Mood + 0, // 10: employee.v1.UpdateEmployeeMoodResponse.UpdateMood.derived_mood:type_name -> employee.v1.Mood + 19, // 11: employee.v1.GetEmployeeByIdResponse.Employee.details:type_name -> employee.v1.GetEmployeeByIdResponse.Employee.Details + 20, // 12: employee.v1.GetEmployeeByIdResponse.Employee.Details.pets:type_name -> employee.v1.GetEmployeeByIdResponse.Employee.Details.Pets + 21, // 13: employee.v1.GetEmployeeByIdResponse.Employee.Details.location:type_name -> employee.v1.GetEmployeeByIdResponse.Employee.Details.Location + 22, // 14: employee.v1.GetEmployeeByIdResponse.Employee.Details.Location.key:type_name -> employee.v1.GetEmployeeByIdResponse.Employee.Details.Location.Key + 24, // 15: employee.v1.FindEmployeesByPetsResponse.FindEmployees.details:type_name -> employee.v1.FindEmployeesByPetsResponse.FindEmployees.Details + 25, // 16: employee.v1.FindEmployeesByPetsResponse.FindEmployees.Details.pets:type_name -> employee.v1.FindEmployeesByPetsResponse.FindEmployees.Details.Pets + 27, // 17: employee.v1.GetEmployeesWithMoodResponse.Employees.details:type_name -> employee.v1.GetEmployeesWithMoodResponse.Employees.Details + 0, // 18: employee.v1.GetEmployeesWithMoodResponse.Employees.current_mood:type_name -> employee.v1.Mood + 28, // 19: employee.v1.GetEmployeesWithMoodResponse.Employees.Details.pets:type_name -> employee.v1.GetEmployeesWithMoodResponse.Employees.Details.Pets + 1, // 20: employee.v1.GetEmployeesWithMoodResponse.Employees.Details.Pets.gender:type_name -> employee.v1.Gender + 30, // 21: employee.v1.GetEmployeesResponse.Employees.details:type_name -> employee.v1.GetEmployeesResponse.Employees.Details + 32, // 22: employee.v1.FindEmployeesByPetsInlineFragmentResponse.FindEmployees.details:type_name -> employee.v1.FindEmployeesByPetsInlineFragmentResponse.FindEmployees.Details + 33, // 23: employee.v1.FindEmployeesByPetsInlineFragmentResponse.FindEmployees.Details.pets:type_name -> employee.v1.FindEmployeesByPetsInlineFragmentResponse.FindEmployees.Details.Pets + 35, // 24: employee.v1.FindEmployeesByPetsNamedFragmentResponse.FindEmployees.details:type_name -> employee.v1.FindEmployeesByPetsNamedFragmentResponse.FindEmployees.Details + 36, // 25: employee.v1.FindEmployeesByPetsNamedFragmentResponse.FindEmployees.Details.pets:type_name -> employee.v1.FindEmployeesByPetsNamedFragmentResponse.FindEmployees.Details.Pets + 6, // 26: employee.v1.EmployeeService.FindEmployeesByPets:input_type -> employee.v1.FindEmployeesByPetsRequest + 12, // 27: employee.v1.EmployeeService.FindEmployeesByPetsInlineFragment:input_type -> employee.v1.FindEmployeesByPetsInlineFragmentRequest + 14, // 28: employee.v1.EmployeeService.FindEmployeesByPetsNamedFragment:input_type -> employee.v1.FindEmployeesByPetsNamedFragmentRequest + 4, // 29: employee.v1.EmployeeService.GetEmployeeById:input_type -> employee.v1.GetEmployeeByIdRequest + 10, // 30: employee.v1.EmployeeService.GetEmployees:input_type -> employee.v1.GetEmployeesRequest + 8, // 31: employee.v1.EmployeeService.GetEmployeesWithMood:input_type -> employee.v1.GetEmployeesWithMoodRequest + 2, // 32: employee.v1.EmployeeService.UpdateEmployeeMood:input_type -> employee.v1.UpdateEmployeeMoodRequest + 7, // 33: employee.v1.EmployeeService.FindEmployeesByPets:output_type -> employee.v1.FindEmployeesByPetsResponse + 13, // 34: employee.v1.EmployeeService.FindEmployeesByPetsInlineFragment:output_type -> employee.v1.FindEmployeesByPetsInlineFragmentResponse + 15, // 35: employee.v1.EmployeeService.FindEmployeesByPetsNamedFragment:output_type -> employee.v1.FindEmployeesByPetsNamedFragmentResponse + 5, // 36: employee.v1.EmployeeService.GetEmployeeById:output_type -> employee.v1.GetEmployeeByIdResponse + 11, // 37: employee.v1.EmployeeService.GetEmployees:output_type -> employee.v1.GetEmployeesResponse + 9, // 38: employee.v1.EmployeeService.GetEmployeesWithMood:output_type -> employee.v1.GetEmployeesWithMoodResponse + 3, // 39: employee.v1.EmployeeService.UpdateEmployeeMood:output_type -> employee.v1.UpdateEmployeeMoodResponse + 33, // [33:40] is the sub-list for method output_type + 26, // [26:33] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_employee_v1_service_proto_init() } +func file_employee_v1_service_proto_init() { + if File_employee_v1_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_employee_v1_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*UpdateEmployeeMoodRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*UpdateEmployeeMoodResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeeByIdRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeeByIdResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeesWithMoodRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeesWithMoodResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsInlineFragmentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsInlineFragmentResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsNamedFragmentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsNamedFragmentResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*UpdateEmployeeMoodResponse_UpdateMood); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*UpdateEmployeeMoodResponse_UpdateMood_Details); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeeByIdResponse_Employee); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeeByIdResponse_Employee_Details); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeeByIdResponse_Employee_Details_Pets); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeeByIdResponse_Employee_Details_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeeByIdResponse_Employee_Details_Location_Key); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[21].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsResponse_FindEmployees); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[22].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsResponse_FindEmployees_Details); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[23].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsResponse_FindEmployees_Details_Pets); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[24].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeesWithMoodResponse_Employees); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[25].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeesWithMoodResponse_Employees_Details); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[26].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeesWithMoodResponse_Employees_Details_Pets); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[27].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeesResponse_Employees); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[28].Exporter = func(v any, i int) any { + switch v := v.(*GetEmployeesResponse_Employees_Details); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[29].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsInlineFragmentResponse_FindEmployees); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[30].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[31].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsInlineFragmentResponse_FindEmployees_Details_Pets); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[32].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsNamedFragmentResponse_FindEmployees); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[33].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_employee_v1_service_proto_msgTypes[34].Exporter = func(v any, i int) any { + switch v := v.(*FindEmployeesByPetsNamedFragmentResponse_FindEmployees_Details_Pets); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_employee_v1_service_proto_rawDesc, + NumEnums: 2, + NumMessages: 35, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_employee_v1_service_proto_goTypes, + DependencyIndexes: file_employee_v1_service_proto_depIdxs, + EnumInfos: file_employee_v1_service_proto_enumTypes, + MessageInfos: file_employee_v1_service_proto_msgTypes, + }.Build() + File_employee_v1_service_proto = out.File + file_employee_v1_service_proto_rawDesc = nil + file_employee_v1_service_proto_goTypes = nil + file_employee_v1_service_proto_depIdxs = nil +} diff --git a/router/connect.config.yaml b/router/connect.config.yaml new file mode 100644 index 0000000000..00a179f95f --- /dev/null +++ b/router/connect.config.yaml @@ -0,0 +1,21 @@ +# yaml-language-server: $schema=./pkg/config/config.schema.json +version: "1" + +# Standard router settings +listen_addr: "localhost:3002" +graphql_path: "/graphql" + +# ConnectRPC configuration +connect_rpc: + enabled: true + server: + listen_addr: "localhost:5026" + storage: + provider_id: "fs-services" + graphql_endpoint: "http://localhost:3002/graphql" + +# Storage providers for services directory +storage_providers: + file_system: + - id: "fs-services" + path: "./pkg/connectrpc/samples/services" diff --git a/router/core/header_rule_engine.go b/router/core/header_rule_engine.go index 509fa2695f..8775cfbb52 100644 --- a/router/core/header_rule_engine.go +++ b/router/core/header_rule_engine.go @@ -3,12 +3,10 @@ package core import ( "context" "fmt" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/graphql_datasource" "io" "net/http" "reflect" "regexp" - "slices" "strings" "sync" "time" @@ -17,9 +15,11 @@ import ( cachedirective "github.com/pquerna/cachecontrol/cacheobject" nodev1 "github.com/wundergraph/cosmo/router/gen/proto/wg/cosmo/node/v1" "github.com/wundergraph/cosmo/router/internal/expr" + "github.com/wundergraph/cosmo/router/internal/headers" "github.com/wundergraph/cosmo/router/pkg/config" "github.com/wundergraph/cosmo/router/pkg/otel" rtrace "github.com/wundergraph/cosmo/router/pkg/trace" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/graphql_datasource" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -27,34 +27,7 @@ import ( var ( _ EnginePreOriginHandler = (*HeaderPropagation)(nil) - ignoredHeaders = []string{ - "Alt-Svc", - "Connection", - "Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google - - // Hop-by-hop headers - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection - "Keep-Alive", - "Proxy-Authenticate", - "Proxy-Authorization", - "Te", // canonicalized version of "TE" - "Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522 - "Transfer-Encoding", - "Upgrade", - - // Content Negotiation. We must never propagate the client headers to the upstream - // The router has to decide on its own what to send to the upstream - "Content-Type", - "Accept-Encoding", - "Accept-Charset", - "Accept", - - // Web Socket negotiation headers. We must never propagate the client headers to the upstream. - "Sec-Websocket-Extensions", - "Sec-Websocket-Key", - "Sec-Websocket-Protocol", - "Sec-Websocket-Version", - } + cacheControlKey = "Cache-Control" expiresKey = "Expires" noCache = "no-cache" @@ -334,7 +307,7 @@ func (h *HeaderPropagation) applyResponseRule(propagation *responseHeaderPropaga } if rule.Named != "" { - if slices.Contains(ignoredHeaders, rule.Named) { + if _, ok := headers.SkippedHeaders[rule.Named]; ok { return } @@ -354,7 +327,7 @@ func (h *HeaderPropagation) applyResponseRule(propagation *responseHeaderPropaga result = !result } if result { - if slices.Contains(ignoredHeaders, name) { + if _, ok := headers.SkippedHeaders[name]; ok { continue } values := res.Header.Values(name) @@ -427,7 +400,7 @@ func (h *HeaderPropagation) applyRequestRule(ctx RequestContext, request *http.R if rule.Rename != "" && rule.Named != "" { // Ignore the rule when the target header is in the ignored list - if slices.Contains(ignoredHeaders, rule.Rename) { + if _, ok := headers.SkippedHeaders[rule.Rename]; ok { return } @@ -450,7 +423,7 @@ func (h *HeaderPropagation) applyRequestRule(ctx RequestContext, request *http.R */ if rule.Named != "" { - if slices.Contains(ignoredHeaders, rule.Named) { + if _, ok := headers.SkippedHeaders[rule.Named]; ok { return } @@ -483,7 +456,7 @@ func (h *HeaderPropagation) applyRequestRule(ctx RequestContext, request *http.R */ if rule.Rename != "" && rule.Named == "" { - if slices.Contains(ignoredHeaders, rule.Rename) { + if _, ok := headers.SkippedHeaders[rule.Rename]; ok { continue } @@ -502,7 +475,7 @@ func (h *HeaderPropagation) applyRequestRule(ctx RequestContext, request *http.R /** * Propagate the header as is */ - if slices.Contains(ignoredHeaders, name) { + if _, ok := headers.SkippedHeaders[name]; ok { continue } request.Header.Set(name, ctx.Request().Header.Get(name)) diff --git a/router/core/header_rule_engine_test.go b/router/core/header_rule_engine_test.go index 33ebebd68d..33b6a5deab 100644 --- a/router/core/header_rule_engine_test.go +++ b/router/core/header_rule_engine_test.go @@ -10,12 +10,22 @@ import ( "github.com/stretchr/testify/require" "github.com/wundergraph/cosmo/router/internal/expr" + "github.com/wundergraph/cosmo/router/internal/headers" "github.com/wundergraph/cosmo/router/pkg/config" "github.com/stretchr/testify/assert" "go.uber.org/zap" ) +// getSkippedHeaderNames returns a slice of header names from the SkippedHeaders map +func getSkippedHeaderNames() []string { + names := make([]string, 0, len(headers.SkippedHeaders)) + for name := range headers.SkippedHeaders { + names = append(names, name) + } + return names +} + func TestPropagateHeaderRule(t *testing.T) { t.Run("Should propagate with named header name / named", func(t *testing.T) { @@ -215,7 +225,7 @@ func TestPropagateHeaderRule(t *testing.T) { }, } - for _, name := range ignoredHeaders { + for _, name := range getSkippedHeaderNames() { rules = append(rules, &config.RequestHeaderRule{ Operation: "propagate", Named: name, @@ -235,7 +245,7 @@ func TestPropagateHeaderRule(t *testing.T) { require.NoError(t, err) clientReq.Header.Set("X-Test-1", "test1") - for i, name := range ignoredHeaders { + for i, name := range getSkippedHeaderNames() { clientReq.Header.Set(name, fmt.Sprintf("test-%d", i)) } @@ -383,7 +393,7 @@ func TestRenamePropagateHeaderRule(t *testing.T) { }, } - for _, name := range ignoredHeaders { + for _, name := range getSkippedHeaderNames() { rules = append(rules, &config.RequestHeaderRule{ Operation: "propagate", Named: fmt.Sprintf("X-Test-%s", name), @@ -404,7 +414,7 @@ func TestRenamePropagateHeaderRule(t *testing.T) { require.NoError(t, err) clientReq.Header.Set("X-Test-Old", "test1") - for i, name := range ignoredHeaders { + for i, name := range getSkippedHeaderNames() { clientReq.Header.Set(fmt.Sprintf("X-Test-%s", name), fmt.Sprintf("X-Test-%d", i)) } @@ -444,7 +454,7 @@ func TestSkipAllIgnoredHeaders(t *testing.T) { require.NoError(t, err) clientReq.Header.Set("X-Test-1", "test1") - for i, header := range ignoredHeaders { + for i, header := range getSkippedHeaderNames() { clientReq.Header.Set(header, fmt.Sprintf("test-%d", i)) } @@ -459,7 +469,7 @@ func TestSkipAllIgnoredHeaders(t *testing.T) { subgraphResolver: NewSubgraphResolver(nil), }) - for _, header := range ignoredHeaders { + for _, header := range getSkippedHeaderNames() { assert.Empty(t, updatedClientReq.Header.Get(header), fmt.Sprintf("header %s should be empty", header)) } @@ -587,7 +597,7 @@ func TestSubgraphPropagateHeaderRule(t *testing.T) { }, } - for _, name := range ignoredHeaders { + for _, name := range getSkippedHeaderNames() { rules = append(rules, &config.RequestHeaderRule{ Operation: "propagate", Named: name, @@ -609,7 +619,7 @@ func TestSubgraphPropagateHeaderRule(t *testing.T) { require.NoError(t, err) clientReq.Header.Set("X-Test-Subgraph", "Test-Value") - for i, name := range ignoredHeaders { + for i, name := range getSkippedHeaderNames() { clientReq.Header.Set(name, fmt.Sprintf("X-Test-%d", i)) } @@ -650,7 +660,7 @@ func TestSubgraphPropagateHeaderRule(t *testing.T) { }, } - for _, name := range ignoredHeaders { + for _, name := range getSkippedHeaderNames() { rules = append(rules, &config.RequestHeaderRule{ Operation: "propagate", Named: fmt.Sprintf("X-Test-%s", name), @@ -673,7 +683,7 @@ func TestSubgraphPropagateHeaderRule(t *testing.T) { require.NoError(t, err) clientReq.Header.Set("X-Test-Subgraph", "Test-Value") - for i, name := range ignoredHeaders { + for i, name := range getSkippedHeaderNames() { clientReq.Header.Set(name, fmt.Sprintf("X-Test-%d", i)) } diff --git a/router/core/router.go b/router/core/router.go index ad4b77cc33..6beb5a022f 100644 --- a/router/core/router.go +++ b/router/core/router.go @@ -10,7 +10,6 @@ import ( "net/http" "net/url" "os" - "path" "sync" "time" @@ -42,6 +41,7 @@ import ( "github.com/wundergraph/cosmo/router/internal/retrytransport" "github.com/wundergraph/cosmo/router/internal/stringsx" "github.com/wundergraph/cosmo/router/pkg/config" + "github.com/wundergraph/cosmo/router/pkg/connectrpc" "github.com/wundergraph/cosmo/router/pkg/controlplane/configpoller" "github.com/wundergraph/cosmo/router/pkg/controlplane/selfregister" "github.com/wundergraph/cosmo/router/pkg/cors" @@ -905,8 +905,6 @@ func (r *Router) bootstrap(ctx context.Context) error { zap.String("provider_id", r.mcp.Storage.ProviderID)) // Find the provider in storage_providers - found := false - // Check for file_system providers for _, provider := range r.storageProviders.FileSystem { if provider.ID == r.mcp.Storage.ProviderID { @@ -916,12 +914,11 @@ func (r *Router) bootstrap(ctx context.Context) error { // Use the resolved file system path operationsDir = provider.Path - found = true break } } - if !found { + if operationsDir == "" { return fmt.Errorf("storage provider with id '%s' for mcp server not found", r.mcp.Storage.ProviderID) } } @@ -949,12 +946,16 @@ func (r *Router) bootstrap(ctx context.Context) error { // Determine the router GraphQL endpoint var routerGraphQLEndpoint string + var err error // Use the custom URL if provided if r.mcp.RouterURL != "" { routerGraphQLEndpoint = r.mcp.RouterURL } else { - routerGraphQLEndpoint = path.Join(r.listenAddr, r.graphqlPath) + routerGraphQLEndpoint, err = url.JoinPath(r.baseURL, r.graphqlPath) + if err != nil { + return fmt.Errorf("failed to construct MCP GraphQL endpoint URL: %w", err) + } } mcpss, err := mcpserver.NewGraphQLSchemaServer( @@ -967,12 +968,92 @@ func (r *Router) bootstrap(ctx context.Context) error { err = mcpss.Start() if err != nil { + // Cleanup the server if Start() fails to prevent resource leaks + if stopErr := mcpss.Stop(ctx); stopErr != nil { + r.logger.Warn("Failed to stop MCP server during error cleanup", zap.Error(stopErr)) + } return fmt.Errorf("failed to start MCP server: %w", err) } r.mcpServer = mcpss } + if r.connectRPC.Enabled { + r.logger.Debug("ConnectRPC configuration", + zap.Bool("enabled", r.connectRPC.Enabled), + zap.String("storage_provider_id", r.connectRPC.Storage.ProviderID), + zap.String("listen_addr", r.connectRPC.Server.ListenAddr), + zap.String("graphql_endpoint", r.connectRPC.GraphQLEndpoint)) + + // Resolve the services provider to get the services directory + var servicesDir string + for _, provider := range r.storageProviders.FileSystem { + if provider.ID == r.connectRPC.Storage.ProviderID { + servicesDir = provider.Path + r.logger.Debug("Resolved services provider", + zap.String("provider_id", provider.ID), + zap.String("path", provider.Path)) + break + } + } + if servicesDir == "" { + return fmt.Errorf("services storage provider with id '%s' for connect_rpc not found", r.connectRPC.Storage.ProviderID) + } + + // Discover services using convention-based approach + discoveredServices, err := connectrpc.DiscoverServices(connectrpc.ServiceDiscoveryConfig{ + ServicesDir: servicesDir, + Logger: r.logger, + }) + if err != nil { + return fmt.Errorf("failed to discover ConnectRPC services: %w", err) + } + + // Determine the router GraphQL endpoint + var routerGraphQLEndpoint string + if r.connectRPC.GraphQLEndpoint != "" { + routerGraphQLEndpoint = r.connectRPC.GraphQLEndpoint + } else { + routerGraphQLEndpoint, err = url.JoinPath(r.baseURL, r.graphqlPath) + if err != nil { + return fmt.Errorf("failed to construct ConnectRPC GraphQL endpoint URL: %w", err) + } + } + + // Initialize the ConnectRPC server with the services directory + serverConfig := connectrpc.ServerConfig{ + ServicesDir: servicesDir, + ListenAddr: r.connectRPC.Server.ListenAddr, + GraphQLEndpoint: routerGraphQLEndpoint, + Logger: r.logger, + CorsConfig: r.corsOptions, + } + + crpcServer, err := connectrpc.NewServer(serverConfig) + if err != nil { + r.logger.Error("Failed to create ConnectRPC server", zap.Error(err)) + return fmt.Errorf("failed to create connect_rpc server: %w", err) + } + + err = crpcServer.Start() + if err != nil { + r.logger.Error("Failed to start ConnectRPC server", zap.Error(err)) + // Cleanup the server if Start() fails to prevent resource leaks + if stopErr := crpcServer.Stop(ctx); stopErr != nil { + r.logger.Warn("Failed to stop ConnectRPC server during error cleanup", zap.Error(stopErr)) + } + return fmt.Errorf("failed to start ConnectRPC server: %w", err) + } + + // Single consolidated INFO log for ConnectRPC startup + r.logger.Info("ConnectRPC server ready", + zap.String("listen_addr", r.connectRPC.Server.ListenAddr), + zap.Int("services", len(discoveredServices)), + zap.Int("operations", crpcServer.GetOperationCount())) + + r.connectRPCServer = crpcServer + } + if r.metricConfig.OpenTelemetry.EngineStats.Enabled() || r.metricConfig.Prometheus.EngineStats.Enabled() || r.engineExecutionConfiguration.Debug.ReportWebSocketConnections { r.EngineStats = statistics.NewEngineStats(ctx, r.logger, r.engineExecutionConfiguration.Debug.ReportWebSocketConnections) } @@ -1473,89 +1554,70 @@ func (r *Router) Shutdown(ctx context.Context) error { var wg sync.WaitGroup if r.prometheusServer != nil { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { if subErr := r.prometheusServer.Close(); subErr != nil { err.Append(fmt.Errorf("failed to shutdown prometheus server: %w", subErr)) } - }() + }) } if r.mcpServer != nil { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { if subErr := r.mcpServer.Stop(ctx); subErr != nil { err.Append(fmt.Errorf("failed to shutdown mcp server: %w", subErr)) } - }() + }) } - if r.tracerProvider != nil { - wg.Add(1) - - go func() { - defer wg.Done() + if r.connectRPCServer != nil { + wg.Go(func() { + if subErr := r.connectRPCServer.Stop(ctx); subErr != nil { + err.Append(fmt.Errorf("failed to shutdown connect_rpc server: %w", subErr)) + } + }) + } + if r.tracerProvider != nil { + wg.Go(func() { if subErr := r.tracerProvider.Shutdown(ctx); subErr != nil { err.Append(fmt.Errorf("failed to shutdown tracer: %w", subErr)) } - }() + }) } if r.gqlMetricsExporter != nil { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { if subErr := r.gqlMetricsExporter.Shutdown(ctx); subErr != nil { err.Append(fmt.Errorf("failed to shutdown graphql metrics exporter: %w", subErr)) } - }() + }) } if r.promMeterProvider != nil { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { if subErr := r.promMeterProvider.Shutdown(ctx); subErr != nil { err.Append(fmt.Errorf("failed to shutdown prometheus meter provider: %w", subErr)) } - }() + }) } if r.otlpMeterProvider != nil { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { if subErr := r.otlpMeterProvider.Shutdown(ctx); subErr != nil { err.Append(fmt.Errorf("failed to shutdown OTLP meter provider: %w", subErr)) } - }() + }) } if r.redisClient != nil { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { if closeErr := r.redisClient.Close(); closeErr != nil { err.Append(fmt.Errorf("failed to close redis client: %w", closeErr)) } - }() + }) } - wg.Add(1) - go func() { - defer wg.Done() - + wg.Go(func() { for _, module := range r.modules { if cleaner, ok := module.(Cleaner); ok { if subErr := cleaner.Cleanup(); subErr != nil { @@ -1563,7 +1625,7 @@ func (r *Router) Shutdown(ctx context.Context) error { } } } - }() + }) // Shutdown the CDN operation client and free up resources if r.persistedOperationClient != nil { @@ -2164,6 +2226,12 @@ func WithPlugins(cfg config.PluginsConfiguration) Option { } } +func WithConnectRPC(cfg config.ConnectRPCConfiguration) Option { + return func(r *Router) { + r.connectRPC = cfg + } +} + func WithDemoMode(demoMode bool) Option { return func(r *Router) { r.demoMode = demoMode diff --git a/router/core/router_config.go b/router/core/router_config.go index 319216a18a..9dd4d69173 100644 --- a/router/core/router_config.go +++ b/router/core/router_config.go @@ -11,6 +11,7 @@ import ( rd "github.com/wundergraph/cosmo/router/internal/rediscloser" "github.com/wundergraph/cosmo/router/internal/retrytransport" "github.com/wundergraph/cosmo/router/pkg/config" + "github.com/wundergraph/cosmo/router/pkg/connectrpc" "github.com/wundergraph/cosmo/router/pkg/controlplane/configpoller" "github.com/wundergraph/cosmo/router/pkg/controlplane/selfregister" "github.com/wundergraph/cosmo/router/pkg/cors" @@ -107,6 +108,7 @@ type Config struct { retryOptions retrytransport.RetryOptions redisClient rd.RDCloser mcpServer *mcpserver.GraphQLSchemaServer + connectRPCServer *connectrpc.Server processStartTime time.Time developmentMode bool healthcheck health.Checker @@ -138,6 +140,7 @@ type Config struct { subscriptionHeartbeatInterval time.Duration hostName string mcp config.MCPConfiguration + connectRPC config.ConnectRPCConfiguration plugins config.PluginsConfiguration tracingAttributes []config.CustomAttribute subscriptionHooks subscriptionHooks @@ -327,6 +330,8 @@ func (c *Config) Usage() map[string]any { usage["mcp_exclude_mutations"] = c.mcp.ExcludeMutations usage["mcp_expose_schema"] = c.mcp.ExposeSchema + usage["connect_rpc"] = c.connectRPC.Enabled + usage["cosmo_cdn"] = c.cdnConfig.URL == "https://cosmo-cdn.wundergraph.com" usage["static_execution_config"] = c.staticExecutionConfig != nil diff --git a/router/core/supervisor_instance.go b/router/core/supervisor_instance.go index 0fafc833e2..43b50f7d1a 100644 --- a/router/core/supervisor_instance.go +++ b/router/core/supervisor_instance.go @@ -269,6 +269,7 @@ func optionsFromResources(logger *zap.Logger, config *config.Config) []Option { WithClientHeader(config.ClientHeader), WithCacheWarmupConfig(&config.CacheWarmup), WithMCP(config.MCP), + WithConnectRPC(config.ConnectRPC), WithPlugins(config.Plugins), WithDemoMode(config.DemoMode), WithStreamsHandlerConfiguration(config.Events.Handlers), diff --git a/router/go.mod b/router/go.mod index 415918d050..878f1387d9 100644 --- a/router/go.mod +++ b/router/go.mod @@ -57,10 +57,12 @@ require ( ) require ( + connectrpc.com/vanguard v0.3.0 github.com/KimMachineGun/automemlimit v0.6.1 github.com/MicahParks/jwkset v0.11.0 github.com/MicahParks/keyfunc/v3 v3.6.2 github.com/alicebob/miniredis/v2 v2.34.0 + github.com/bufbuild/protocompile v0.14.1 github.com/caarlos0/env/v11 v11.3.1 github.com/cep21/circuit/v4 v4.0.0 github.com/dgraph-io/ristretto/v2 v2.1.0 @@ -83,6 +85,7 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/ratelimit v0.3.1 golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 + golang.org/x/net v0.46.0 golang.org/x/text v0.30.0 golang.org/x/time v0.9.0 ) @@ -92,7 +95,6 @@ require ( github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bufbuild/protocompile v0.14.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cilium/ebpf v0.9.1 // indirect github.com/containerd/cgroups/v3 v3.0.2 // indirect @@ -122,6 +124,7 @@ require ( github.com/hashicorp/yamux v0.1.1 // indirect github.com/invopop/jsonschema v0.13.0 // indirect github.com/jensneuse/byte-template v0.0.0-20231025215717-69252eb3ed56 // indirect + github.com/jhump/protoreflect v1.17.0 // indirect github.com/kingledion/go-tools v0.6.0 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect @@ -165,7 +168,6 @@ require ( go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.43.0 // indirect - golang.org/x/net v0.46.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250811230008-5f3141c8851a // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect diff --git a/router/go.sum b/router/go.sum index 715b552f47..47e005b446 100644 --- a/router/go.sum +++ b/router/go.sum @@ -1,5 +1,7 @@ connectrpc.com/connect v1.16.2 h1:ybd6y+ls7GOlb7Bh5C8+ghA6SvCBajHwxssO2CGFjqE= connectrpc.com/connect v1.16.2/go.mod h1:n2kgwskMHXC+lVqb18wngEpF95ldBHXjZYJussz5FRc= +connectrpc.com/vanguard v0.3.0 h1:prUKFm8rYDwvpvnOSoqdUowPMK0tRA0pbSrQoMd6Zng= +connectrpc.com/vanguard v0.3.0/go.mod h1:nxQ7+N6qhBiQczqGwdTw4oCqx1rDryIt20cEdECqToM= github.com/99designs/gqlgen v0.17.76 h1:YsJBcfACWmXWU2t1yCjoGdOmqcTfOFpjbLAE443fmYI= github.com/99designs/gqlgen v0.17.76/go.mod h1:miiU+PkAnTIDKMQ1BseUOIVeQHoiwYDZGCswoxl7xec= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -159,8 +161,8 @@ github.com/jensneuse/byte-template v0.0.0-20231025215717-69252eb3ed56 h1:wo26fh6 github.com/jensneuse/byte-template v0.0.0-20231025215717-69252eb3ed56/go.mod h1:0D5r/VSW6D/o65rKLL9xk7sZxL2+oku2HvFPYeIMFr4= github.com/jensneuse/diffview v1.0.0 h1:4b6FQJ7y3295JUHU3tRko6euyEboL825ZsXeZZM47Z4= github.com/jensneuse/diffview v1.0.0/go.mod h1:i6IacuD8LnEaPuiyzMHA+Wfz5mAuycMOf3R/orUY9y4= -github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= -github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= +github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= diff --git a/router/internal/headers/headers.go b/router/internal/headers/headers.go new file mode 100644 index 0000000000..b5c4231dc4 --- /dev/null +++ b/router/internal/headers/headers.go @@ -0,0 +1,37 @@ +package headers + +// SkippedHeaders are headers that should not be forwarded to downstream services. +// These headers are connection-specific or should be set by the client/server +// rather than being forwarded from the original request. +var SkippedHeaders = map[string]struct{}{ + "Alt-Svc": {}, + "Connection": {}, + "Proxy-Connection": {}, // non-standard but still sent by libcurl and rejected by e.g. google + + // Hop-by-hop headers + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection + "Keep-Alive": {}, + "Proxy-Authenticate": {}, + "Proxy-Authorization": {}, + "Te": {}, // canonicalized version of "TE" + "Trailer": {}, // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522 + "Transfer-Encoding": {}, + "Upgrade": {}, + + // Content Negotiation. We must never propagate the client headers to the upstream + // The router has to decide on its own what to send to the upstream + "Content-Type": {}, + "Accept-Encoding": {}, + "Accept-Charset": {}, + "Accept": {}, + + // Web Socket negotiation headers. We must never propagate the client headers to the upstream. + "Sec-Websocket-Extensions": {}, + "Sec-Websocket-Key": {}, + "Sec-Websocket-Protocol": {}, + "Sec-Websocket-Version": {}, + + // Additional headers that should not be forwarded + "Host": {}, + "Content-Length": {}, +} diff --git a/router/pkg/config/config.go b/router/pkg/config/config.go index 8eb71bc5f1..dac3b22b64 100644 --- a/router/pkg/config/config.go +++ b/router/pkg/config/config.go @@ -1017,6 +1017,22 @@ type MCPServer struct { BaseURL string `yaml:"base_url,omitempty" env:"MCP_SERVER_BASE_URL"` } +type ConnectRPCConfiguration struct { + Enabled bool `yaml:"enabled" envDefault:"false" env:"CONNECT_RPC_ENABLED"` + Server ConnectRPCServer `yaml:"server,omitempty" envPrefix:"CONNECT_RPC_SERVER_"` + Storage ConnectRPCStorageConfig `yaml:"storage,omitempty"` + GraphQLEndpoint string `yaml:"graphql_endpoint,omitempty" env:"CONNECT_RPC_GRAPHQL_ENDPOINT"` +} + +type ConnectRPCStorageConfig struct { + ProviderID string `yaml:"provider_id,omitempty" env:"CONNECT_RPC_STORAGE_PROVIDER_ID"` +} + +type ConnectRPCServer struct { + ListenAddr string `yaml:"listen_addr" envDefault:"localhost:5026" env:"LISTEN_ADDR"` + BaseURL string `yaml:"base_url,omitempty" env:"BASE_URL"` +} + type PluginsConfiguration struct { Enabled bool `yaml:"enabled" envDefault:"false" env:"ENABLED"` Path string `yaml:"path" envDefault:"plugins" env:"PATH"` @@ -1035,17 +1051,18 @@ type IntrospectionConfiguration struct { type Config struct { Version string `yaml:"version,omitempty" ignored:"true"` - InstanceID string `yaml:"instance_id,omitempty" env:"INSTANCE_ID"` - Graph Graph `yaml:"graph,omitempty"` - Telemetry Telemetry `yaml:"telemetry,omitempty"` - GraphqlMetrics GraphqlMetrics `yaml:"graphql_metrics,omitempty"` - CORS CORS `yaml:"cors,omitempty"` - Cluster Cluster `yaml:"cluster,omitempty"` - Compliance ComplianceConfig `yaml:"compliance,omitempty"` - TLS TLSConfiguration `yaml:"tls,omitempty"` - CacheControl CacheControlPolicy `yaml:"cache_control_policy"` - MCP MCPConfiguration `yaml:"mcp,omitempty"` - DemoMode bool `yaml:"demo_mode,omitempty" envDefault:"false" env:"DEMO_MODE"` + InstanceID string `yaml:"instance_id,omitempty" env:"INSTANCE_ID"` + Graph Graph `yaml:"graph,omitempty"` + Telemetry Telemetry `yaml:"telemetry,omitempty"` + GraphqlMetrics GraphqlMetrics `yaml:"graphql_metrics,omitempty"` + CORS CORS `yaml:"cors,omitempty"` + Cluster Cluster `yaml:"cluster,omitempty"` + Compliance ComplianceConfig `yaml:"compliance,omitempty"` + TLS TLSConfiguration `yaml:"tls,omitempty"` + CacheControl CacheControlPolicy `yaml:"cache_control_policy"` + MCP MCPConfiguration `yaml:"mcp,omitempty"` + ConnectRPC ConnectRPCConfiguration `yaml:"connect_rpc,omitempty"` + DemoMode bool `yaml:"demo_mode,omitempty" envDefault:"false" env:"DEMO_MODE"` Modules map[string]interface{} `yaml:"modules,omitempty"` Headers HeaderRules `yaml:"headers,omitempty"` diff --git a/router/pkg/config/config.schema.json b/router/pkg/config/config.schema.json index a531fa4af3..d1315e25df 100644 --- a/router/pkg/config/config.schema.json +++ b/router/pkg/config/config.schema.json @@ -1376,7 +1376,8 @@ "type": "string", "description": "The URL of the control plane. The URL is used to register the router on the control-plane. The URL is specified as a string with the format 'scheme://host:port'.", "default": "https://cosmo-cp.wundergraph.com", - "format": "http-url" + "format": "uri", + "pattern": "^https?://" }, "playground": { "type": "object", @@ -2152,6 +2153,66 @@ } } }, + "connect_rpc": { + "type": "object", + "description": "The configuration for the ConnectRPC server. ConnectRPC allows gRPC, Connect, and gRPC-Web clients to interact with your GraphQL APIs through protocol transcoding. Proto files define RPC services that map to GraphQL operations.", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean", + "default": false, + "description": "Enable the ConnectRPC server. If the value is true, the ConnectRPC server will be started." + }, + "server": { + "type": "object", + "description": "Server configuration for the ConnectRPC server.", + "additionalProperties": false, + "properties": { + "listen_addr": { + "type": "string", + "description": "The address on which the ConnectRPC server listens for incoming requests. The address is specified as a string with the format 'host:port'.", + "default": "localhost:5026", + "format": "hostname-port" + }, + "base_url": { + "type": "string", + "description": "The base URL of the ConnectRPC server. This is used for service reflection and documentation. By default, the base URL is relative to the URL that the router is running on. The URL is specified as a string with the format 'scheme://host:port'.", + "format": "http-url" + } + } + }, + "storage": { + "type": "object", + "description": "Storage provider configuration for the ConnectRPC server. This specifies where proto files and GraphQL operations for all services are loaded from.", + "additionalProperties": false, + "properties": { + "provider_id": { + "type": "string", + "description": "The ID of the storage provider to use for loading proto files and GraphQL operations for all services. Only storage provider of type 'file_system' are supported. The provider must be configured in the storage_providers section. This provider points to the root services directory, and the router will recursively discover all proto files and their associated operations within that directory structure." + } + } + }, + "graphql_endpoint": { + "type": "string", + "description": "Custom URL to use for the router GraphQL endpoint in ConnectRPC. Use this when your router is behind a proxy. This URL is used to forward RPC requests as GraphQL operations.", + "format": "uri", + "pattern": "^https?://" + } + }, + "if": { + "properties": { + "enabled": { "const": true } + } + }, + "then": { + "required": ["storage"], + "properties": { + "storage": { + "required": ["provider_id"] + } + } + } + }, "demo_mode": { "type": "boolean", "description": "Launch the router in demo mode. If no execution config is found, the router will start with a demo execution config and deploy a demo federated graph that can be used for testing purposes.", diff --git a/router/pkg/config/connectrpc_test.go b/router/pkg/config/connectrpc_test.go new file mode 100644 index 0000000000..ba698d2d85 --- /dev/null +++ b/router/pkg/config/connectrpc_test.go @@ -0,0 +1,215 @@ +package config + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestConnectRPCConfiguration_ZeroValueSemantics tests that the zero value +// represents a safe, disabled state - a meaningful invariant to protect. +func TestConnectRPCConfiguration_ZeroValueSemantics(t *testing.T) { + var cfg ConnectRPCConfiguration + + // These are the semantic expectations that matter: + assert.False(t, cfg.Enabled, "ConnectRPC must be disabled by default for safety") + assert.Empty(t, cfg.GraphQLEndpoint, "no implicit upstream when disabled") +} + +// TestConnectRPCConfiguration_LoadFromYAML tests that config loading works correctly +// with actual YAML parsing and environment variable expansion. +func TestConnectRPCConfiguration_LoadFromYAML(t *testing.T) { + tests := []struct { + name string + yaml string + envVars map[string]string + wantEnabled bool + wantListenAddr string + wantBaseURL string + wantGraphQL string + wantProviderID string + }{ + { + name: "minimal config with defaults", + yaml: `connect_rpc: + enabled: true + storage: + provider_id: "fs-services" + graphql_endpoint: "http://localhost:3002/graphql" +`, + wantEnabled: true, + wantListenAddr: "localhost:5026", // from envDefault tag + wantBaseURL: "", + wantGraphQL: "http://localhost:3002/graphql", + wantProviderID: "fs-services", + }, + { + name: "full config with overrides", + yaml: `connect_rpc: + enabled: true + server: + listen_addr: "0.0.0.0:8080" + base_url: "http://example.com" + storage: + provider_id: "fs-protos" + graphql_endpoint: "http://localhost:4000/graphql" +`, + wantEnabled: true, + wantListenAddr: "0.0.0.0:8080", + wantBaseURL: "http://example.com", + wantGraphQL: "http://localhost:4000/graphql", + wantProviderID: "fs-protos", + }, + { + name: "config with environment variables", + yaml: `connect_rpc: + enabled: true + storage: + provider_id: "${PROVIDER_ID}" + graphql_endpoint: "${GRAPHQL_ENDPOINT}" +`, + envVars: map[string]string{ + "GRAPHQL_ENDPOINT": "http://env-graphql:3002/graphql", + "PROVIDER_ID": "env-provider", + }, + wantEnabled: true, + wantListenAddr: "localhost:5026", + wantBaseURL: "", + wantGraphQL: "http://env-graphql:3002/graphql", + wantProviderID: "env-provider", + }, + { + name: "disabled config", + yaml: `connect_rpc: + enabled: false +`, + wantEnabled: false, + wantListenAddr: "localhost:5026", + wantBaseURL: "", + wantGraphQL: "", + wantProviderID: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set environment variables + for k, v := range tt.envVars { + t.Setenv(k, v) + } + + // Create temporary config file + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "config.yaml") + err := os.WriteFile(configPath, []byte(tt.yaml), 0644) + require.NoError(t, err) + + // Load config + result, err := LoadConfig([]string{configPath}) + require.NoError(t, err) + require.NotNil(t, result) + + cfg := result.Config.ConnectRPC + + assert.Equal(t, tt.wantEnabled, cfg.Enabled) + assert.Equal(t, tt.wantListenAddr, cfg.Server.ListenAddr) + assert.Equal(t, tt.wantBaseURL, cfg.Server.BaseURL) + assert.Equal(t, tt.wantGraphQL, cfg.GraphQLEndpoint) + assert.Equal(t, tt.wantProviderID, cfg.Storage.ProviderID) + }) + } +} + +// TestConnectRPCConfiguration_EnvDefaults tests that environment variable +// defaults are applied correctly when no config file values are provided. +func TestConnectRPCConfiguration_EnvDefaults(t *testing.T) { + yaml := ` +connect_rpc: + enabled: true + storage: + provider_id: "fs-services" + graphql_endpoint: "http://localhost:3002/graphql" +` + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "config.yaml") + err := os.WriteFile(configPath, []byte(yaml), 0644) + require.NoError(t, err) + + result, err := LoadConfig([]string{configPath}) + require.NoError(t, err) + + // Verify envDefault values are applied + assert.Equal(t, "localhost:5026", result.Config.ConnectRPC.Server.ListenAddr, + "should use envDefault from struct tag") +} + +// TestConnectRPCConfiguration_Integration tests that ConnectRPC config +// integrates properly with the main Config structure through actual loading. +func TestConnectRPCConfiguration_Integration(t *testing.T) { + yaml := ` +version: "1" +listen_addr: "localhost:3002" +connect_rpc: + enabled: true + server: + listen_addr: "0.0.0.0:5026" + storage: + provider_id: "fs-protos" + graphql_endpoint: "http://localhost:3002/graphql" +` + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "config.yaml") + err := os.WriteFile(configPath, []byte(yaml), 0644) + require.NoError(t, err) + + result, err := LoadConfig([]string{configPath}) + require.NoError(t, err) + + // Verify ConnectRPC is properly nested in main config + assert.True(t, result.Config.ConnectRPC.Enabled) + assert.Equal(t, "0.0.0.0:5026", result.Config.ConnectRPC.Server.ListenAddr) + assert.Equal(t, "fs-protos", result.Config.ConnectRPC.Storage.ProviderID) + + // Verify main config is also loaded + assert.Equal(t, "localhost:3002", result.Config.ListenAddr) +} + +// TestConnectRPCConfiguration_MultipleConfigMerge tests that ConnectRPC config +// can be properly merged across multiple config files. +func TestConnectRPCConfiguration_MultipleConfigMerge(t *testing.T) { + baseYaml := ` +connect_rpc: + enabled: true + storage: + provider_id: "base-provider" + graphql_endpoint: "http://localhost:3002/graphql" +` + overrideYaml := ` +connect_rpc: + server: + listen_addr: "0.0.0.0:9090" + storage: + provider_id: "override-provider" +` + + tmpDir := t.TempDir() + basePath := filepath.Join(tmpDir, "base.yaml") + overridePath := filepath.Join(tmpDir, "override.yaml") + + err := os.WriteFile(basePath, []byte(baseYaml), 0644) + require.NoError(t, err) + err = os.WriteFile(overridePath, []byte(overrideYaml), 0644) + require.NoError(t, err) + + result, err := LoadConfig([]string{basePath, overridePath}) + require.NoError(t, err) + + // Verify merged config + assert.True(t, result.Config.ConnectRPC.Enabled, "should keep base value") + assert.Equal(t, "http://localhost:3002/graphql", result.Config.ConnectRPC.GraphQLEndpoint, "should keep base value") + assert.Equal(t, "0.0.0.0:9090", result.Config.ConnectRPC.Server.ListenAddr, "should use override value") + assert.Equal(t, "override-provider", result.Config.ConnectRPC.Storage.ProviderID, "should use override value") +} diff --git a/router/pkg/config/testdata/config_defaults.json b/router/pkg/config/testdata/config_defaults.json index b4ddad685e..948b1f1fa8 100644 --- a/router/pkg/config/testdata/config_defaults.json +++ b/router/pkg/config/testdata/config_defaults.json @@ -140,6 +140,17 @@ "RouterURL": "", "OmitToolNamePrefix": false }, + "ConnectRPC": { + "Enabled": false, + "Server": { + "ListenAddr": "localhost:5026", + "BaseURL": "" + }, + "Storage": { + "ProviderID": "" + }, + "GraphQLEndpoint": "" + }, "DemoMode": false, "Modules": null, "Headers": { diff --git a/router/pkg/config/testdata/config_full.json b/router/pkg/config/testdata/config_full.json index d4707aa1a8..d669f6511f 100644 --- a/router/pkg/config/testdata/config_full.json +++ b/router/pkg/config/testdata/config_full.json @@ -175,6 +175,17 @@ "RouterURL": "https://cosmo-router.wundergraph.com", "OmitToolNamePrefix": false }, + "ConnectRPC": { + "Enabled": false, + "Server": { + "ListenAddr": "localhost:5026", + "BaseURL": "" + }, + "Storage": { + "ProviderID": "" + }, + "GraphQLEndpoint": "" + }, "DemoMode": true, "Modules": { "myModule": { diff --git a/router/pkg/connectrpc/connect_util.go b/router/pkg/connectrpc/connect_util.go new file mode 100644 index 0000000000..70198c4cdd --- /dev/null +++ b/router/pkg/connectrpc/connect_util.go @@ -0,0 +1,80 @@ +package connectrpc + +import ( + "net/http" + + "connectrpc.com/connect" +) + +// HTTPStatusToConnectCode maps HTTP status codes to Connect error codes. +// Based on Connect RPC specification and common HTTP status code semantics. +func HTTPStatusToConnectCode(statusCode int) connect.Code { + switch statusCode { + case http.StatusBadRequest: // 400 + return connect.CodeInvalidArgument + case http.StatusUnauthorized: // 401 + return connect.CodeUnauthenticated + case http.StatusForbidden: // 403 + return connect.CodePermissionDenied + case http.StatusNotFound: // 404 + return connect.CodeNotFound + case http.StatusConflict: // 409 + return connect.CodeAborted + case http.StatusPreconditionFailed: // 412 + return connect.CodeFailedPrecondition + case http.StatusRequestEntityTooLarge: // 413 + return connect.CodeResourceExhausted + case http.StatusRequestedRangeNotSatisfiable: // 416 + return connect.CodeOutOfRange + case http.StatusTooManyRequests: // 429 + return connect.CodeResourceExhausted + case http.StatusRequestTimeout: // 408 + return connect.CodeDeadlineExceeded + case http.StatusGatewayTimeout: // 504 + return connect.CodeDeadlineExceeded + case http.StatusNotImplemented: // 501 + return connect.CodeUnimplemented + case http.StatusServiceUnavailable: // 503 + return connect.CodeUnavailable + case http.StatusInternalServerError: // 500 + return connect.CodeInternal + default: + // For any other status code (including 2xx success codes), + // return CodeUnknown as a safe default + return connect.CodeUnknown + } +} + +// ConnectCodeToHTTPStatus maps Connect error codes to HTTP status codes. +// This is the inverse of HTTPStatusToConnectCode. +func ConnectCodeToHTTPStatus(code connect.Code) int { + switch code { + case connect.CodeInvalidArgument: + return http.StatusBadRequest // 400 + case connect.CodeUnauthenticated: + return http.StatusUnauthorized // 401 + case connect.CodePermissionDenied: + return http.StatusForbidden // 403 + case connect.CodeNotFound: + return http.StatusNotFound // 404 + case connect.CodeAborted: + return http.StatusConflict // 409 + case connect.CodeFailedPrecondition: + return http.StatusPreconditionFailed // 412 + case connect.CodeResourceExhausted: + return http.StatusTooManyRequests // 429 + case connect.CodeOutOfRange: + return http.StatusRequestedRangeNotSatisfiable // 416 + case connect.CodeDeadlineExceeded: + return http.StatusGatewayTimeout // 504 + case connect.CodeUnimplemented: + return http.StatusNotImplemented // 501 + case connect.CodeUnavailable: + return http.StatusServiceUnavailable // 503 + case connect.CodeInternal: + return http.StatusInternalServerError // 500 + default: + // For unknown codes or other errors, return 500 + return http.StatusInternalServerError // 500 + } +} diff --git a/router/pkg/connectrpc/constructor_validation_test.go b/router/pkg/connectrpc/constructor_validation_test.go new file mode 100644 index 0000000000..969a20ac7a --- /dev/null +++ b/router/pkg/connectrpc/constructor_validation_test.go @@ -0,0 +1,199 @@ +package connectrpc + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +// TestConstructorValidation verifies that constructors reject invalid configurations +func TestConstructorValidation(t *testing.T) { + t.Parallel() + + logger := zap.NewNop() + httpClient := &http.Client{} + + tests := []struct { + name string + constructor func() (any, error) + wantErr string + }{ + // RPCHandler validation + { + name: "RPCHandler: empty graphql endpoint", + constructor: func() (any, error) { + return NewRPCHandler(HandlerConfig{ + HTTPClient: httpClient, + Logger: logger, + OperationRegistry: NewOperationRegistry(nil), + ProtoLoader: NewProtoLoader(logger), + }) + }, + wantErr: "graphql endpoint cannot be empty", + }, + { + name: "RPCHandler: nil http client", + constructor: func() (any, error) { + return NewRPCHandler(HandlerConfig{ + GraphQLEndpoint: "http://localhost:4000/graphql", + Logger: logger, + OperationRegistry: NewOperationRegistry(nil), + ProtoLoader: NewProtoLoader(logger), + }) + }, + wantErr: "http client cannot be nil", + }, + { + name: "RPCHandler: nil logger", + constructor: func() (any, error) { + return NewRPCHandler(HandlerConfig{ + GraphQLEndpoint: "http://localhost:4000/graphql", + HTTPClient: httpClient, + Logger: nil, + OperationRegistry: NewOperationRegistry(nil), + ProtoLoader: NewProtoLoader(logger), + }) + }, + wantErr: "logger is required", + }, + { + name: "RPCHandler: missing operation registry", + constructor: func() (any, error) { + return NewRPCHandler(HandlerConfig{ + GraphQLEndpoint: "http://localhost:4000/graphql", + HTTPClient: httpClient, + Logger: logger, + ProtoLoader: NewProtoLoader(logger), + }) + }, + wantErr: "operation registry is required", + }, + { + name: "RPCHandler: missing proto loader", + constructor: func() (any, error) { + return NewRPCHandler(HandlerConfig{ + GraphQLEndpoint: "http://localhost:4000/graphql", + HTTPClient: httpClient, + Logger: logger, + OperationRegistry: NewOperationRegistry(nil), + }) + }, + wantErr: "proto loader is required", + }, + + // Server validation + { + name: "Server: empty services directory", + constructor: func() (any, error) { + return NewServer(ServerConfig{ + GraphQLEndpoint: "http://localhost:4000/graphql", + Logger: logger, + }) + }, + wantErr: "services directory must be provided", + }, + { + name: "Server: nil logger", + constructor: func() (any, error) { + return NewServer(ServerConfig{ + ServicesDir: "samples/services", + GraphQLEndpoint: "http://localhost:4000/graphql", + Logger: nil, + }) + }, + wantErr: "logger is required", + }, + { + name: "Server: empty graphql endpoint", + constructor: func() (any, error) { + return NewServer(ServerConfig{ + ServicesDir: "samples/services", + Logger: logger, + }) + }, + wantErr: "graphql endpoint cannot be empty", + }, + + // VanguardService validation + { + name: "VanguardService: nil handler", + constructor: func() (any, error) { + protoLoader := NewProtoLoader(logger) + err := protoLoader.LoadFromDirectory("samples/services/employee.v1") + if err != nil { + return nil, err + } + return NewVanguardService(VanguardServiceConfig{ + Handler: nil, + ProtoLoader: protoLoader, + Logger: logger, + }) + }, + wantErr: "handler cannot be nil", + }, + { + name: "VanguardService: nil proto loader", + constructor: func() (any, error) { + return NewVanguardService(VanguardServiceConfig{ + Handler: &RPCHandler{}, + ProtoLoader: nil, + Logger: logger, + }) + }, + wantErr: "proto loader cannot be nil", + }, + { + name: "VanguardService: no proto services", + constructor: func() (any, error) { + protoLoader := NewProtoLoader(logger) + return NewVanguardService(VanguardServiceConfig{ + Handler: &RPCHandler{}, + ProtoLoader: protoLoader, + Logger: logger, + }) + }, + wantErr: "no proto services found", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := tt.constructor() + assert.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), tt.wantErr) + }) + } +} + +// TestConstructorDefaults tests that constructors apply sensible defaults +func TestConstructorDefaults(t *testing.T) { + t.Parallel() + + t.Run("RPCHandler: adds protocol to endpoint", func(t *testing.T) { + handler, err := NewRPCHandler(HandlerConfig{ + GraphQLEndpoint: "localhost:4000/graphql", + HTTPClient: &http.Client{}, + Logger: zap.NewNop(), + OperationRegistry: NewOperationRegistry(nil), + ProtoLoader: NewProtoLoader(zap.NewNop()), + }) + + require.NoError(t, err) + assert.Equal(t, "http://localhost:4000/graphql", handler.graphqlEndpoint) + }) + + t.Run("Server: uses default listen address", func(t *testing.T) { + server, err := NewServer(ServerConfig{ + ServicesDir: "samples/services", + GraphQLEndpoint: "http://localhost:4000/graphql", + Logger: zap.NewNop(), + }) + + require.NoError(t, err) + assert.Equal(t, "0.0.0.0:5026", server.config.ListenAddr) + }) +} diff --git a/router/pkg/connectrpc/error_handling_test.go b/router/pkg/connectrpc/error_handling_test.go new file mode 100644 index 0000000000..664ab4989c --- /dev/null +++ b/router/pkg/connectrpc/error_handling_test.go @@ -0,0 +1,376 @@ +package connectrpc + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "testing" + + "connectrpc.com/connect" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +// errorTestCase defines a test case for error handling +type errorTestCase struct { + name string + httpStatus int + graphqlResponse string + expectedConnectCode connect.Code + expectedErrorContains string + expectedMetadata map[string]string + expectedGraphQLErrors string // JSON string for GraphQL errors + expectedPartialData string // JSON string for partial data +} + +// TestErrorHandling consolidates all error handling tests with shared setup +func TestErrorHandling(t *testing.T) { + t.Parallel() + + tests := []errorTestCase{ + // HTTP Transport Errors + { + name: "HTTP 401 Unauthorized", + httpStatus: http.StatusUnauthorized, + graphqlResponse: "Unauthorized", + expectedConnectCode: connect.CodeUnauthenticated, + expectedErrorContains: "GraphQL request failed with HTTP 401", + expectedMetadata: map[string]string{ + MetaKeyHTTPStatus: "401", + MetaKeyErrorClassification: ErrorClassificationCritical, + }, + }, + { + name: "HTTP 500 Internal Server Error", + httpStatus: http.StatusInternalServerError, + graphqlResponse: "Internal Server Error", + expectedConnectCode: connect.CodeInternal, + expectedErrorContains: "GraphQL request failed with HTTP 500", + expectedMetadata: map[string]string{ + MetaKeyHTTPStatus: "500", + MetaKeyErrorClassification: ErrorClassificationCritical, + }, + }, + { + name: "HTTP 503 Service Unavailable", + httpStatus: http.StatusServiceUnavailable, + graphqlResponse: "Service Unavailable", + expectedConnectCode: connect.CodeUnavailable, + expectedErrorContains: "GraphQL request failed with HTTP 503", + expectedMetadata: map[string]string{ + MetaKeyHTTPStatus: "503", + MetaKeyErrorClassification: ErrorClassificationCritical, + }, + }, + + // GraphQL CRITICAL Errors (no data) + { + name: "GraphQL error with null data", + httpStatus: http.StatusOK, + graphqlResponse: `{ + "errors": [ + { + "message": "Field 'user' not found", + "path": ["user"], + "locations": [{"line": 2, "column": 3}] + } + ], + "data": null + }`, + expectedConnectCode: connect.CodeUnknown, + expectedErrorContains: "GraphQL operation failed", + expectedMetadata: map[string]string{ + MetaKeyErrorClassification: ErrorClassificationCritical, + }, + expectedGraphQLErrors: `[ + { + "message": "Field 'user' not found", + "path": ["user"], + "locations": [{"line": 2, "column": 3}] + } + ]`, + }, + { + name: "Multiple GraphQL errors with no data", + httpStatus: http.StatusOK, + graphqlResponse: `{ + "errors": [ + { + "message": "Authentication required", + "extensions": {"code": "UNAUTHENTICATED"} + }, + { + "message": "Invalid token", + "extensions": {"code": "INVALID_TOKEN"} + } + ], + "data": null + }`, + expectedConnectCode: connect.CodeUnknown, + expectedErrorContains: "GraphQL operation failed", + expectedMetadata: map[string]string{ + MetaKeyErrorClassification: ErrorClassificationCritical, + }, + expectedGraphQLErrors: `[ + { + "message": "Authentication required", + "extensions": {"code": "UNAUTHENTICATED"} + }, + { + "message": "Invalid token", + "extensions": {"code": "INVALID_TOKEN"} + } + ]`, + }, + + // GraphQL NON-CRITICAL Errors (with partial data) + { + name: "Partial success - some fields succeeded, some failed", + httpStatus: http.StatusOK, + graphqlResponse: `{ + "data": { + "user": { + "id": "123", + "name": "John Doe", + "email": null + } + }, + "errors": [ + { + "message": "Email field requires authentication", + "path": ["user", "email"], + "extensions": {"code": "FORBIDDEN"} + } + ] + }`, + expectedConnectCode: connect.CodeUnknown, + expectedErrorContains: "GraphQL partial success with errors", + expectedMetadata: map[string]string{ + MetaKeyErrorClassification: ErrorClassificationPartial, + }, + expectedPartialData: `{ + "user": { + "id": "123", + "name": "John Doe", + "email": null + } + }`, + expectedGraphQLErrors: `[ + { + "message": "Email field requires authentication", + "path": ["user", "email"], + "extensions": {"code": "FORBIDDEN"} + } + ]`, + }, + { + name: "Multiple field errors with partial data", + httpStatus: http.StatusOK, + graphqlResponse: `{ + "data": { + "posts": [ + {"id": "1", "title": "Post 1"}, + null, + {"id": "3", "title": "Post 3"} + ] + }, + "errors": [ + { + "message": "Post not found", + "path": ["posts", 1] + }, + { + "message": "Access denied", + "path": ["posts", 1, "author"] + } + ] + }`, + expectedConnectCode: connect.CodeUnknown, + expectedErrorContains: "GraphQL partial success with errors", + expectedMetadata: map[string]string{ + MetaKeyErrorClassification: ErrorClassificationPartial, + }, + expectedPartialData: `{ + "posts": [ + {"id": "1", "title": "Post 1"}, + null, + {"id": "3", "title": "Post 3"} + ] + }`, + expectedGraphQLErrors: `[ + { + "message": "Post not found", + "path": ["posts", 1] + }, + { + "message": "Access denied", + "path": ["posts", 1, "author"] + } + ]`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup mock HTTP client + httpClient := MockHTTPClient(tt.httpStatus, tt.graphqlResponse) + + handler, err := NewRPCHandler(HandlerConfig{ + GraphQLEndpoint: "http://localhost:4000/graphql", + HTTPClient: httpClient, + Logger: zap.NewNop(), + OperationRegistry: NewOperationRegistry(nil), + ProtoLoader: NewProtoLoader(zap.NewNop()), + }) + require.NoError(t, err) + + // Execute + ctx := context.Background() + _, err = handler.executeGraphQL(ctx, "query { test }", json.RawMessage("{}")) + + // Assert error + require.Error(t, err) + + // Check it's a Connect error + var connectErr *connect.Error + require.True(t, errors.As(err, &connectErr)) + + // Check error code + assert.Equal(t, tt.expectedConnectCode, connectErr.Code()) + + // Check error message + assert.Contains(t, connectErr.Message(), tt.expectedErrorContains) + + // Check metadata + for key, expectedValue := range tt.expectedMetadata { + actualValue := connectErr.Meta().Get(key) + assert.Equal(t, expectedValue, actualValue, "metadata key: %s", key) + } + + // Check GraphQL errors if expected + if tt.expectedGraphQLErrors != "" { + errorsJSON := connectErr.Meta().Get(MetaKeyGraphQLErrors) + require.NotEmpty(t, errorsJSON) + require.JSONEq(t, tt.expectedGraphQLErrors, errorsJSON, "GraphQL errors should match") + } + + // Check partial data if expected + if tt.expectedPartialData != "" { + partialData := connectErr.Meta().Get(MetaKeyGraphQLPartialData) + require.NotEmpty(t, partialData) + require.JSONEq(t, tt.expectedPartialData, partialData, "Partial data should match") + } + }) + } +} + +// TestSuccessfulGraphQLResponses tests successful GraphQL responses +func TestSuccessfulGraphQLResponses(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + graphqlResponse string + expectedData string + }{ + { + name: "Simple successful query", + graphqlResponse: `{ + "data": { + "user": { + "id": "123", + "name": "John Doe" + } + } + }`, + expectedData: `{ + "user": { + "id": "123", + "name": "John Doe" + } + }`, + }, + { + name: "Successful query with nested data", + graphqlResponse: `{ + "data": { + "users": [ + {"id": "1", "name": "Alice"}, + {"id": "2", "name": "Bob"} + ] + } + }`, + expectedData: `{ + "users": [ + {"id": "1", "name": "Alice"}, + {"id": "2", "name": "Bob"} + ] + }`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup mock HTTP client + httpClient := MockHTTPClient(http.StatusOK, tt.graphqlResponse) + + handler, err := NewRPCHandler(HandlerConfig{ + GraphQLEndpoint: "http://localhost:4000/graphql", + HTTPClient: httpClient, + Logger: zap.NewNop(), + OperationRegistry: NewOperationRegistry(nil), + ProtoLoader: NewProtoLoader(zap.NewNop()), + }) + require.NoError(t, err) + + // Execute + ctx := context.Background() + data, err := handler.executeGraphQL(ctx, "query { test }", json.RawMessage("{}")) + + // Assert success + require.NoError(t, err) + require.NotNil(t, data) + + // Check data content + require.JSONEq(t, tt.expectedData, string(data)) + }) + } +} + +// TestResponseBodyNotInMetadata tests that response bodies are NOT included in client-facing metadata +func TestResponseBodyNotInMetadata(t *testing.T) { + t.Parallel() + + // Setup mock HTTP client with a response body + httpClient := MockHTTPClient(http.StatusInternalServerError, "Internal Server Error") + + handler, err := NewRPCHandler(HandlerConfig{ + GraphQLEndpoint: "http://localhost:4000/graphql", + HTTPClient: httpClient, + Logger: zap.NewNop(), + OperationRegistry: NewOperationRegistry(nil), + ProtoLoader: NewProtoLoader(zap.NewNop()), + }) + require.NoError(t, err) + + // Execute + ctx := context.Background() + _, err = handler.executeGraphQL(ctx, "query { test }", json.RawMessage("{}")) + + // Assert error + require.Error(t, err) + + // Check it's a Connect error + var connectErr *connect.Error + require.True(t, errors.As(err, &connectErr)) + + // Verify response body is NOT in metadata (security requirement) + responseBodyMeta := connectErr.Meta().Get(MetaKeyHTTPResponseBody) + assert.Empty(t, responseBodyMeta, "Response body should NOT be included in client-facing metadata to prevent information leakage") + + // Verify other metadata is still present + assert.NotEmpty(t, connectErr.Meta().Get(MetaKeyHTTPStatus), "HTTP status should be present") + assert.NotEmpty(t, connectErr.Meta().Get(MetaKeyErrorClassification), "Error classification should be present") +} diff --git a/router/pkg/connectrpc/handler.go b/router/pkg/connectrpc/handler.go new file mode 100644 index 0000000000..2944f021fd --- /dev/null +++ b/router/pkg/connectrpc/handler.go @@ -0,0 +1,685 @@ +package connectrpc + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "connectrpc.com/connect" + "go.uber.org/zap" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/wundergraph/cosmo/router/internal/headers" +) + +var ( + ErrInternalServer = errors.New("internal server error") +) + +// requestHeadersKey is a custom context key for storing request headers +type requestHeadersKey struct{} + +// withRequestHeaders adds request headers to the context +func withRequestHeaders(ctx context.Context, headers http.Header) context.Context { + return context.WithValue(ctx, requestHeadersKey{}, headers) +} + +// headersFromContext extracts the request headers from the context +func headersFromContext(ctx context.Context) (http.Header, error) { + value := ctx.Value(requestHeadersKey{}) + if value == nil { + return nil, fmt.Errorf("missing request headers") + } + headers, ok := value.(http.Header) + if !ok { + return nil, fmt.Errorf("invalid request headers type") + } + return headers, nil +} + +// Metadata keys for Connect error metadata +const ( + MetaKeyHTTPStatus = "http-status" + MetaKeyErrorClassification = "error-classification" + MetaKeyGraphQLErrors = "graphql-errors" + MetaKeyGraphQLPartialData = "graphql-partial-data" + MetaKeyHTTPResponseBody = "http-response-body" +) + +// Error classification values +const ( + ErrorClassificationCritical = "CRITICAL" + ErrorClassificationPartial = "PARTIAL" +) + +// GraphQLRequest represents a GraphQL request structure +type GraphQLRequest struct { + Query string `json:"query"` + Variables json.RawMessage `json:"variables,omitempty"` +} + +// GraphQLErrorLocation represents the location of an error in the GraphQL query +type GraphQLErrorLocation struct { + Line int `json:"line"` + Column int `json:"column"` +} + +// GraphQLError represents an error returned in a GraphQL response +type GraphQLError struct { + Message string `json:"message"` + Path []any `json:"path,omitempty"` + Locations []GraphQLErrorLocation `json:"locations,omitempty"` + Extensions map[string]any `json:"extensions,omitempty"` +} + +// GraphQLResponse represents a GraphQL response structure +type GraphQLResponse struct { + Errors []GraphQLError `json:"errors,omitempty"` + Data json.RawMessage `json:"data,omitempty"` +} + +// RPCHandler handles RPC requests and orchestrates GraphQL execution +type RPCHandler struct { + graphqlEndpoint string + httpClient *http.Client + logger *zap.Logger + operationRegistry *OperationRegistry + protoLoader *ProtoLoader +} + +// HandlerConfig contains configuration for the RPC handler +type HandlerConfig struct { + GraphQLEndpoint string + HTTPClient *http.Client + Logger *zap.Logger + OperationRegistry *OperationRegistry + ProtoLoader *ProtoLoader +} + +// NewRPCHandler creates a new RPC handler +func NewRPCHandler(config HandlerConfig) (*RPCHandler, error) { + if config.GraphQLEndpoint == "" { + return nil, fmt.Errorf("graphql endpoint cannot be empty") + } + + if config.HTTPClient == nil { + return nil, fmt.Errorf("http client cannot be nil") + } + + if config.Logger == nil { + return nil, fmt.Errorf("logger is required") + } + + if config.OperationRegistry == nil { + return nil, fmt.Errorf("operation registry is required") + } + + if config.ProtoLoader == nil { + return nil, fmt.Errorf("proto loader is required") + } + + // Ensure the endpoint has a protocol + if !strings.Contains(config.GraphQLEndpoint, "://") { + config.GraphQLEndpoint = "http://" + config.GraphQLEndpoint + } + + return &RPCHandler{ + graphqlEndpoint: config.GraphQLEndpoint, + httpClient: config.HTTPClient, + logger: config.Logger, + operationRegistry: config.OperationRegistry, + protoLoader: config.ProtoLoader, + }, nil +} + +// HandleRPC processes an RPC request and returns a response +// serviceName: fully qualified service name (e.g., "mypackage.MyService") +// methodName: the RPC method name (e.g., "GetUser" or "QueryGetUser") +// requestJSON: the JSON-encoded request body +// ctx: request context with headers +func (h *RPCHandler) HandleRPC(ctx context.Context, serviceName, methodName string, requestJSON []byte) ([]byte, error) { + h.logger.Debug("handling RPC request", + zap.String("service", serviceName), + zap.String("method", methodName), + zap.String("request_json", string(requestJSON))) + + // Look up operation from registry scoped to this service + // This ensures operations can only be called from their owning service + // The method name must exactly match the operation name + operation := h.operationRegistry.GetOperationForService(serviceName, methodName) + if operation == nil { + // Log all available operations for this service to help diagnose the issue + allOps := h.operationRegistry.GetAllOperationsForService(serviceName) + var availableOps []string + for _, op := range allOps { + availableOps = append(availableOps, op.Name) + } + h.logger.Error("operation not found", + zap.String("service", serviceName), + zap.String("requested_method", methodName), + zap.Strings("available_operations", availableOps)) + return nil, fmt.Errorf("operation not found for service %s: %s", serviceName, methodName) + } + + h.logger.Debug("resolved operation", + zap.String("service", serviceName), + zap.String("rpc_method", methodName), + zap.String("operation", operation.Name), + zap.String("type", operation.OperationType)) + + // Convert proto JSON to GraphQL variables + // This handles: + // - Field name mapping via graphql_variable_name options (e.g., hasPets → HAS_PETS) + // - Enum prefix stripping (e.g., MOOD_HAPPY → HAPPY) + // - Omitting _UNSPECIFIED enum values + variables, err := h.convertProtoJSONToGraphQLVariables(serviceName, methodName, requestJSON) + if err != nil { + return nil, fmt.Errorf("failed to convert proto JSON to GraphQL variables: %w", err) + } + + // Execute the GraphQL query + responseJSON, err := h.executeGraphQL(ctx, operation.OperationString, variables) + if err != nil { + return nil, fmt.Errorf("failed to execute GraphQL query: %w", err) + } + + return responseJSON, nil +} + +// convertProtoJSONToGraphQLVariables processes proto JSON for GraphQL compatibility. +// +// IMPORTANT: Field names ARE converted here when graphql_variable_name field options are present. +// Protobuf JSON marshaling automatically converts field names from snake_case (in .proto files) +// to camelCase (in JSON) per the protobuf JSON specification. By the time this function receives +// the data, field names are already in camelCase format. +// +// This function performs two types of transformations: +// 1. Field name mapping: Uses graphql_variable_name field options to rename fields +// Example: "hasPets" (proto JSON) → "HAS_PETS" (GraphQL variable) +// 2. Enum value transformations: +// - Strips proto enum type prefixes: MOOD_HAPPY → HAPPY, STATUS_ACTIVE → ACTIVE +// - Omits _UNSPECIFIED enum values (proto default values that don't exist in GraphQL) +// +// DESIGN RATIONALE: +// - Proto field options allow explicit mapping between proto JSON and GraphQL variable names +// - Proto enums include type prefix for namespacing (MOOD_HAPPY, STATUS_ACTIVE) +// - GraphQL enums omit the prefix for cleaner API (HAPPY, ACTIVE) +// - _UNSPECIFIED is proto's zero value (doesn't exist in GraphQL schemas) +// +// This matches the behavior of tools like protographic which generate GraphQL schemas +// from proto definitions. +func (h *RPCHandler) convertProtoJSONToGraphQLVariables(serviceName, methodName string, protoJSON []byte) (json.RawMessage, error) { + if len(protoJSON) == 0 { + return json.RawMessage("{}"), nil + } + + var protoData map[string]any + if err := json.Unmarshal(protoJSON, &protoData); err != nil { + return nil, fmt.Errorf("failed to unmarshal proto JSON: %w", err) + } + + // Get proto message descriptor for enum detection and field options + // If protoLoader is not available, we can't do transformations, so return as-is + if h.protoLoader == nil { + return protoJSON, nil + } + + method, err := h.protoLoader.GetMethod(serviceName, methodName) + if err != nil { + // Method not found in proto loader - this shouldn't happen in production + // since operations are registered from proto definitions, but in tests + // or edge cases we may not have the full schema loaded + h.logger.Debug("method not found in proto loader, skipping transformations", + zap.String("service", serviceName), + zap.String("method", methodName), + zap.Error(err)) + return protoJSON, nil + } + + messageDesc := method.InputMessageDescriptor + if messageDesc == nil { + // This shouldn't happen with valid proto definitions + h.logger.Warn("input message descriptor is nil, skipping transformations", + zap.String("service", serviceName), + zap.String("method", methodName)) + return protoJSON, nil + } + + // Check if any transformations are actually needed + needsTransformation := h.needsTransformation(protoData, messageDesc) + if !needsTransformation { + // Input already matches expected format, return as-is + return protoJSON, nil + } + + // Create a set to track fields that came from _UNSPECIFIED enums + unspecifiedFields := make(map[string]bool) + + graphqlData := h.convertKeysRecursiveWithTracking(protoData, messageDesc, "", unspecifiedFields) + + graphqlJSON, err := json.Marshal(graphqlData) + if err != nil { + return nil, fmt.Errorf("failed to marshal GraphQL variables: %w", err) + } + + return graphqlJSON, nil +} + +// convertKeysRecursiveWithTracking processes data recursively to: +// 1. Rename fields based on graphql_variable_name field options +// 2. Strip proto enum prefixes using schema information +// Tracks fields that came from _UNSPECIFIED enums and omits only those when empty. +func (h *RPCHandler) convertKeysRecursiveWithTracking(data any, messageDesc protoreflect.MessageDescriptor, pathPrefix string, unspecifiedFields map[string]bool) any { + switch v := data.(type) { + case map[string]any: + result := make(map[string]any) + for key, value := range v { + fieldPath := pathPrefix + key + + var fieldDesc protoreflect.FieldDescriptor + if messageDesc != nil { + // Try to find field descriptor - protobuf JSON uses camelCase, but descriptors use original names + fieldDesc = getFieldByJSONName(messageDesc, key) + } + + convertedValue := h.convertValueRecursiveWithTracking(value, fieldDesc, fieldPath, unspecifiedFields) + + // Only omit empty strings that came from _UNSPECIFIED enum conversions + if strVal, ok := convertedValue.(string); ok && strVal == "" { + if unspecifiedFields[fieldPath] { + // This empty string came from an _UNSPECIFIED enum, omit it + continue + } + // Otherwise, it's a legitimate empty string, keep it + } + + // Check if field has graphql_variable_name option + outputKey := key + if fieldDesc != nil { + if graphqlVarName := getGraphQLVariableName(fieldDesc); graphqlVarName != "" { + outputKey = graphqlVarName + } + } + + result[outputKey] = convertedValue + } + return result + case []any: + result := make([]any, len(v)) + for i, item := range v { + itemPath := fmt.Sprintf("%s[%d].", pathPrefix, i) + result[i] = h.convertKeysRecursiveWithTracking(item, messageDesc, itemPath, unspecifiedFields) + } + return result + default: + return h.convertValueRecursiveWithTracking(v, nil, pathPrefix, unspecifiedFields) + } +} + +// convertValueRecursiveWithTracking processes a value using field descriptor for schema-aware enum detection +// and marks fields that came from _UNSPECIFIED enums +func (h *RPCHandler) convertValueRecursiveWithTracking(value any, fieldDesc protoreflect.FieldDescriptor, fieldPath string, unspecifiedFields map[string]bool) any { + switch v := value.(type) { + case map[string]any: + var nestedDesc protoreflect.MessageDescriptor + if fieldDesc != nil { + nestedDesc = getMessageType(fieldDesc) + } + return h.convertKeysRecursiveWithTracking(v, nestedDesc, fieldPath+".", unspecifiedFields) + + case []any: + result := make([]any, len(v)) + for i, item := range v { + itemPath := fmt.Sprintf("%s[%d]", fieldPath, i) + result[i] = h.convertValueRecursiveWithTracking(item, fieldDesc, itemPath, unspecifiedFields) + } + return result + + case string: + // Schema-aware: check if field is an enum type + if fieldDesc != nil { + enumDesc := getEnumType(fieldDesc) + if enumDesc != nil { + enumTypeName := string(enumDesc.Name()) + stripped := stripEnumPrefixWithType(v, enumTypeName) + + // Mark this field if it was an _UNSPECIFIED enum + if stripped == "" && v != "" { + // The original value was non-empty but became empty after stripping + // This means it was an _UNSPECIFIED enum + unspecifiedFields[fieldPath] = true + } + + return stripped + } + } + + return v + + default: + return v + } +} + +// stripEnumPrefixWithType removes the enum type prefix using the known enum type name from schema +// Example: stripEnumPrefixWithType("USER_STATUS_ACTIVE", "UserStatus") -> "ACTIVE" +// Special case: _UNSPECIFIED values are treated as empty string (will be omitted or null in GraphQL) +func stripEnumPrefixWithType(protoEnumValue, enumTypeName string) string { + // Convert enum type name to UPPER_SNAKE_CASE (matching protographic's logic) + prefix := toUpperSnakeCase(enumTypeName) + "_" + + if after, ok := strings.CutPrefix(protoEnumValue, prefix); ok { + stripped := after + + // Handle _UNSPECIFIED values: these are proto-only (value 0) and don't exist in GraphQL + // Return empty string so they can be omitted or treated as null + if stripped == "UNSPECIFIED" { + return "" + } + + return stripped + } + + // If prefix doesn't match, return as-is (shouldn't happen with valid proto) + return protoEnumValue +} + +// toUpperSnakeCase converts a string to UPPER_SNAKE_CASE +// Example: "UserStatus" -> "USER_STATUS" +func toUpperSnakeCase(s string) string { + // If already contains underscores or is all uppercase, just uppercase it + if strings.Contains(s, "_") || s == strings.ToUpper(s) { + return strings.ToUpper(s) + } + + var result strings.Builder + for i, r := range s { + // Add underscore before uppercase letters (except first character) + if i > 0 && r >= 'A' && r <= 'Z' { + // Check if previous character was lowercase + prev := rune(s[i-1]) + if prev >= 'a' && prev <= 'z' { + result.WriteByte('_') + } + } + result.WriteRune(r) + } + return strings.ToUpper(result.String()) +} + +// makeCriticalGraphQLError creates a Connect error for GraphQL errors with no data (complete failure). +// This follows Relay's error classification pattern for critical errors. +func (h *RPCHandler) makeCriticalGraphQLError(errors []GraphQLError, httpStatus int) error { + // Serialize GraphQL errors to JSON for metadata + errorsJSON, _ := json.Marshal(errors) + + // Create Connect error with CRITICAL classification + // Use CodeUnknown for GraphQL errors (not CodeInternal which implies server defects) + connectErr := connect.NewError( + connect.CodeUnknown, + fmt.Errorf("GraphQL operation failed: %s", errors[0].Message), + ) + connectErr.Meta().Set(MetaKeyErrorClassification, ErrorClassificationCritical) + connectErr.Meta().Set(MetaKeyGraphQLErrors, string(errorsJSON)) + connectErr.Meta().Set(MetaKeyHTTPStatus, fmt.Sprintf("%d", httpStatus)) + + // Log all error messages + var errorMessages []string + for _, gqlErr := range errors { + errorMessages = append(errorMessages, gqlErr.Message) + } + h.logger.Error("CRITICAL GraphQL errors - no data returned", + zap.Strings("error_messages", errorMessages), + zap.Int("error_count", len(errors))) + + return connectErr +} + +// makePartialGraphQLError creates a Connect error for GraphQL errors with partial data (partial success). +// This follows Relay's pattern for field-level errors where some data was successfully retrieved. +func (h *RPCHandler) makePartialGraphQLError(errors []GraphQLError, data json.RawMessage, httpStatus int) error { + // Serialize errors to JSON for metadata + errorsJSON, _ := json.Marshal(errors) + + // Compact the partial data JSON to remove whitespace + var compactData bytes.Buffer + if err := json.Compact(&compactData, data); err == nil { + data = compactData.Bytes() + } + + // Create Connect error with PARTIAL classification + connectErr := connect.NewError( + connect.CodeUnknown, // Use Unknown for partial failures + fmt.Errorf("GraphQL partial success with errors"), + ) + connectErr.Meta().Set(MetaKeyErrorClassification, ErrorClassificationPartial) + connectErr.Meta().Set(MetaKeyGraphQLErrors, string(errorsJSON)) + connectErr.Meta().Set(MetaKeyGraphQLPartialData, string(data)) + connectErr.Meta().Set(MetaKeyHTTPStatus, fmt.Sprintf("%d", httpStatus)) + + // Log info for partial success (this is a valid GraphQL pattern) + var errorMessages []string + for _, gqlErr := range errors { + errorMessages = append(errorMessages, gqlErr.Message) + } + h.logger.Info("PARTIAL GraphQL response - data returned with field errors", + zap.Strings("error_messages", errorMessages), + zap.Int("error_count", len(errors)), + zap.Bool("has_partial_data", true)) + + return connectErr +} + +// executeGraphQL executes a GraphQL query against the router endpoint +func (h *RPCHandler) executeGraphQL(ctx context.Context, query string, variables json.RawMessage) ([]byte, error) { + // Create the GraphQL request + graphqlRequest := GraphQLRequest{ + Query: query, + Variables: variables, + } + + var requestBody bytes.Buffer + if err := json.NewEncoder(&requestBody).Encode(graphqlRequest); err != nil { + return nil, fmt.Errorf("failed to marshal GraphQL request: %w", err) + } + + // Create HTTP request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, h.graphqlEndpoint, &requestBody) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP request: %w", err) + } + + // Forward headers from the original RPC request + reqHeaders, err := headersFromContext(ctx) + if err != nil { + h.logger.Debug("no headers in context", zap.Error(err)) + } else { + // Copy headers, skipping those that shouldn't be forwarded + for key, values := range reqHeaders { + // Normalize header key to canonical form for case-insensitive comparison + canonicalKey := http.CanonicalHeaderKey(key) + if _, skip := headers.SkippedHeaders[canonicalKey]; skip { + continue + } + for _, value := range values { + req.Header.Add(key, value) + } + } + } + + // Set required headers for GraphQL + req.Header.Set("Content-Type", "application/json; charset=utf-8") + req.Header.Set("Accept", "application/json") + + // Execute the request + resp, err := h.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to execute HTTP request: %w", err) + } + defer resp.Body.Close() + + // Read the response body + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body: %w", err) + } + + // Check for HTTP errors (non-2xx status codes) + if resp.StatusCode != http.StatusOK { + // Map HTTP status to Connect error code + code := HTTPStatusToConnectCode(resp.StatusCode) + + // Log full response body server-side only + h.logger.Error("HTTP error from GraphQL endpoint", + zap.Int("status_code", resp.StatusCode), + zap.String("connect_code", code.String()), + zap.Int("response_body_length", len(responseBody)), + zap.String("response_body", string(responseBody))) + + // Create Connect error with metadata + // Note: We do NOT include the response body in client-facing metadata to prevent + // leaking sensitive information (internal URLs, stack traces, auth tokens, etc.) + connectErr := connect.NewError(code, fmt.Errorf("GraphQL request failed with HTTP %d", resp.StatusCode)) + connectErr.Meta().Set(MetaKeyErrorClassification, ErrorClassificationCritical) + connectErr.Meta().Set(MetaKeyHTTPStatus, fmt.Sprintf("%d", resp.StatusCode)) + + return nil, connectErr + } + + // Parse the GraphQL response to check for errors + var graphqlResponse GraphQLResponse + if err := json.Unmarshal(responseBody, &graphqlResponse); err != nil { + // If we can't parse it, return the raw response (backward compatibility) + h.logger.Error("failed to parse GraphQL response", + zap.Error(err), + zap.Int("response_body_length", len(responseBody))) + return nil, connect.NewError(connect.CodeInternal, ErrInternalServer) + } + + // Check if we have GraphQL errors + if len(graphqlResponse.Errors) > 0 { + // Determine if this is CRITICAL or PARTIAL based on data presence + // An empty object {} is valid data in GraphQL (e.g., when all fields are nullable and null) + hasData := len(graphqlResponse.Data) > 0 && string(graphqlResponse.Data) != "null" + + if !hasData { + // CRITICAL: Errors with no data - complete failure + return nil, h.makeCriticalGraphQLError(graphqlResponse.Errors, resp.StatusCode) + } + + // PARTIAL: Errors with partial data - partial success + return nil, h.makePartialGraphQLError(graphqlResponse.Errors, graphqlResponse.Data, resp.StatusCode) + } + + // Success case: Return only the data field + // The proto response message expects just the data payload: {...} + // Not the GraphQL wrapper: {"data": {...}, "errors": [...]} + if len(graphqlResponse.Data) > 0 && string(graphqlResponse.Data) != "null" { + return graphqlResponse.Data, nil + } + + // Edge case: No errors but also no data (empty response) + // Return empty object to ensure valid JSON for proto unmarshaling + // The caller (vanguard_service.go) expects non-nil JSON bytes + return []byte("{}"), nil +} + +// GetOperationCount returns the number of operations available +func (h *RPCHandler) GetOperationCount() int { + if h.operationRegistry == nil { + return 0 + } + return h.operationRegistry.Count() +} + +// needsTransformation checks if any field in the data needs transformation +// (either has graphql_variable_name option or contains _UNSPECIFIED enum values) +func (h *RPCHandler) needsTransformation(data any, messageDesc protoreflect.MessageDescriptor) bool { + switch v := data.(type) { + case map[string]any: + for key, value := range v { + var fieldDesc protoreflect.FieldDescriptor + if messageDesc != nil { + fieldDesc = getFieldByJSONName(messageDesc, key) + } + + // Check if this field has a graphql_variable_name option + if fieldDesc != nil { + if graphqlVarName := getGraphQLVariableName(fieldDesc); graphqlVarName != "" { + // Field needs renaming + return true + } + + // Check if field is an enum (any enum value needs transformation for prefix stripping) + if enumDesc := getEnumType(fieldDesc); enumDesc != nil { + if strVal, ok := value.(string); ok && strVal != "" { + // Any non-empty enum string value needs transformation + // to strip the enum type prefix (e.g., MOOD_HAPPY -> HAPPY) + return true + } + } + + // Recursively check nested messages + if msgDesc := getMessageType(fieldDesc); msgDesc != nil { + if h.needsTransformation(value, msgDesc) { + return true + } + } + } + } + return false + case []any: + // Check array elements + for _, item := range v { + if h.needsTransformation(item, messageDesc) { + return true + } + } + return false + default: + return false + } +} + +// getGraphQLVariableName extracts the graphql_variable_name field option if present +func getGraphQLVariableName(fieldDesc protoreflect.FieldDescriptor) string { + if fieldDesc == nil { + return "" + } + + opts := fieldDesc.Options() + if opts == nil { + return "" + } + + // Get the descriptor for the options message + optsReflect := opts.ProtoReflect() + if !optsReflect.IsValid() { + return "" + } + + // The graphql_variable_name option is defined in proto/com/wundergraph/connectrpc/options/v1/annotations.proto + // Extension fields are stored in the message's extension fields, not in + // the descriptor's Extensions(). We need to iterate through the actual + // extension fields that are SET on this particular options instance. + + // Range over all fields that are actually set on this options message + var result string + optsReflect.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + // Check if this is the graphql_variable_name extension field + if fd.IsExtension() && fd.Number() == GraphQLVariableNameFieldNumber { + if v.IsValid() { + result = v.String() + } + return false // Stop iteration + } + return true // Continue iteration + }) + + return result +} diff --git a/router/pkg/connectrpc/handler_test.go b/router/pkg/connectrpc/handler_test.go new file mode 100644 index 0000000000..b85206b2da --- /dev/null +++ b/router/pkg/connectrpc/handler_test.go @@ -0,0 +1,179 @@ +package connectrpc + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +// setupHandlerWithSchema creates a handler with loaded proto schema for testing +func setupHandlerWithSchema(t *testing.T) *RPCHandler { + logger := zap.NewNop() + + // Load proto schema from testdata + protoLoader := NewProtoLoader(logger) + testdataDir := filepath.Join("testdata") + err := protoLoader.LoadFromDirectory(testdataDir) + require.NoError(t, err, "failed to load test proto files") + + return &RPCHandler{ + logger: logger, + protoLoader: protoLoader, + } +} + +func TestConvertProtoJSONToGraphQLVariables(t *testing.T) { + logger := zap.NewNop() + handler := &RPCHandler{logger: logger} + + t.Run("preserves camelCase field names from protobuf JSON", func(t *testing.T) { + // Protobuf JSON already provides camelCase field names + protoJSON := []byte(`{"employeeId": 1, "firstName": "John"}`) + result, err := handler.convertProtoJSONToGraphQLVariables("test.Service", "TestMethod", protoJSON) + require.NoError(t, err) + + // Field names should be preserved as-is (with ID capitalization) + assert.JSONEq(t, `{ + "employeeId": 1, + "firstName": "John" + }`, string(result)) + }) + + t.Run("strips proto enum prefixes with loaded schema", func(t *testing.T) { + handler := setupHandlerWithSchema(t) + + // Protobuf JSON provides camelCase field names + protoJSON := []byte(`{"employeeId": "123", "mood": "MOOD_HAPPY"}`) + result, err := handler.convertProtoJSONToGraphQLVariables("test.EmployeeService", "GetEmployee", protoJSON) + require.NoError(t, err) + + // With schema loaded, MOOD_HAPPY should become HAPPY + assert.JSONEq(t, `{ + "employeeId": "123", + "mood": "HAPPY" + }`, string(result)) + }) + + t.Run("omits _UNSPECIFIED enum values", func(t *testing.T) { + handler := setupHandlerWithSchema(t) + + protoJSON := []byte(`{"name": "John", "mood": "MOOD_UNSPECIFIED", "status": "STATUS_ACTIVE"}`) + result, err := handler.convertProtoJSONToGraphQLVariables("test.EmployeeService", "GetEmployee", protoJSON) + require.NoError(t, err) + + // MOOD_UNSPECIFIED should be omitted, but name and status should be present + assert.JSONEq(t, `{ + "name": "John", + "status": "ACTIVE" + }`, string(result)) + }) + + t.Run("preserves legitimate empty string fields", func(t *testing.T) { + handler := setupHandlerWithSchema(t) + + // Empty string in a non-enum field should be preserved + protoJSON := []byte(`{"name": "", "mood": "MOOD_HAPPY"}`) + result, err := handler.convertProtoJSONToGraphQLVariables("test.EmployeeService", "GetEmployee", protoJSON) + require.NoError(t, err) + + // Empty string should be preserved (not omitted like UNSPECIFIED enums) + assert.JSONEq(t, `{ + "name": "", + "mood": "HAPPY" + }`, string(result)) + }) + + t.Run("handles multiple _UNSPECIFIED enums", func(t *testing.T) { + handler := setupHandlerWithSchema(t) + + protoJSON := []byte(`{"name": "John", "mood": "MOOD_UNSPECIFIED", "status": "STATUS_UNSPECIFIED"}`) + result, err := handler.convertProtoJSONToGraphQLVariables("test.EmployeeService", "GetEmployee", protoJSON) + require.NoError(t, err) + + // Both UNSPECIFIED enums should be omitted, only name should remain + assert.JSONEq(t, `{ + "name": "John" + }`, string(result)) + }) + + t.Run("handles multiple enum values without schema", func(t *testing.T) { + protoJSON := []byte(`{"status": "STATUS_ACTIVE", "role": "ROLE_ADMIN"}`) + result, err := handler.convertProtoJSONToGraphQLVariables("test.Service", "TestMethod", protoJSON) + require.NoError(t, err) + + // Without schema, enum values pass through unchanged + assert.JSONEq(t, `{ + "status": "STATUS_ACTIVE", + "role": "ROLE_ADMIN" + }`, string(result)) + }) + + t.Run("handles enum with multiple underscores in value without schema", func(t *testing.T) { + protoJSON := []byte(`{"visibility": "VISIBILITY_FRIENDS_ONLY"}`) + result, err := handler.convertProtoJSONToGraphQLVariables("test.Service", "TestMethod", protoJSON) + require.NoError(t, err) + + // Without schema, enum values pass through unchanged + assert.JSONEq(t, `{ + "visibility": "VISIBILITY_FRIENDS_ONLY" + }`, string(result)) + }) + + t.Run("handles nested objects with enums without schema", func(t *testing.T) { + protoJSON := []byte(`{"user": {"id": 1, "status": "STATUS_ACTIVE"}}`) + result, err := handler.convertProtoJSONToGraphQLVariables("test.Service", "TestMethod", protoJSON) + require.NoError(t, err) + + // Without schema, enum values pass through unchanged + assert.JSONEq(t, `{ + "user": { + "id": 1, + "status": "STATUS_ACTIVE" + } + }`, string(result)) + }) + + t.Run("handles arrays with enums without schema", func(t *testing.T) { + protoJSON := []byte(`{"roles": ["ROLE_ADMIN", "ROLE_USER"]}`) + result, err := handler.convertProtoJSONToGraphQLVariables("test.Service", "TestMethod", protoJSON) + require.NoError(t, err) + + // Without schema, enum values pass through unchanged + assert.JSONEq(t, `{ + "roles": ["ROLE_ADMIN", "ROLE_USER"] + }`, string(result)) + }) + + t.Run("does not modify non-enum uppercase strings", func(t *testing.T) { + protoJSON := []byte(`{"code": "SUCCESS", "name": "JOHN"}`) + result, err := handler.convertProtoJSONToGraphQLVariables("test.Service", "TestMethod", protoJSON) + require.NoError(t, err) + + // Strings without underscores should not be modified + assert.JSONEq(t, `{ + "code": "SUCCESS", + "name": "JOHN" + }`, string(result)) + }) + + t.Run("handles empty JSON", func(t *testing.T) { + result, err := handler.convertProtoJSONToGraphQLVariables("test.Service", "TestMethod", []byte{}) + require.NoError(t, err) + assert.JSONEq(t, `{}`, string(result)) + }) + + t.Run("handles mixed case strings", func(t *testing.T) { + protoJSON := []byte(`{"message": "Hello_World"}`) + result, err := handler.convertProtoJSONToGraphQLVariables("test.Service", "TestMethod", protoJSON) + require.NoError(t, err) + + // Mixed case strings should not be treated as enums + assert.JSONEq(t, `{ + "message": "Hello_World" + }`, string(result)) + }) +} + diff --git a/router/pkg/connectrpc/helpers_test.go b/router/pkg/connectrpc/helpers_test.go new file mode 100644 index 0000000000..f8cd23cd52 --- /dev/null +++ b/router/pkg/connectrpc/helpers_test.go @@ -0,0 +1,100 @@ +package connectrpc + +import ( + "io" + "net/http" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/wundergraph/cosmo/router/pkg/schemaloader" +) + +// Shared proto loader to avoid registration conflicts across tests +var ( + sharedProtoLoaders = make(map[string]*ProtoLoader) + sharedProtoLoaderMutex sync.Mutex +) + +// GetSharedProtoLoader returns a shared proto loader instance for the given directory. +// This ensures proto files are loaded exactly once per directory to avoid registration conflicts. +func GetSharedProtoLoader(t *testing.T, dir string) *ProtoLoader { + t.Helper() + + sharedProtoLoaderMutex.Lock() + defer sharedProtoLoaderMutex.Unlock() + + if loader, exists := sharedProtoLoaders[dir]; exists { + return loader + } + + loader := NewProtoLoader(zap.NewNop()) + err := loader.LoadFromDirectory(dir) + require.NoError(t, err, "failed to load proto files from %s", dir) + + sharedProtoLoaders[dir] = loader + return loader +} + +// MockHTTPClient creates a mock HTTP client that returns predefined responses +func MockHTTPClient(statusCode int, responseBody string) *http.Client { + return &http.Client{ + Transport: &mockRoundTripper{ + statusCode: statusCode, + responseBody: responseBody, + }, + } +} + +// buildTestOperations creates a test operations map for a service. +// This is a test-only helper that builds the operations map for the immutable registry. +func buildTestOperations(serviceName, operationName string, op *schemaloader.Operation) map[string]map[string]*schemaloader.Operation { + return map[string]map[string]*schemaloader.Operation{ + serviceName: { + operationName: op, + }, + } +} + +type mockRoundTripper struct { + statusCode int + responseBody string +} + +func (m *mockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: m.statusCode, + Body: io.NopCloser(strings.NewReader(m.responseBody)), + Header: make(http.Header), + }, nil +} + +// NewTestRPCHandler creates a test RPC handler with sensible defaults +func NewTestRPCHandler(t *testing.T, protoLoader *ProtoLoader) *RPCHandler { + t.Helper() + + // Build test operations map + serviceName := "employee.v1.EmployeeService" + operations := buildTestOperations(serviceName, "GetEmployeeById", &schemaloader.Operation{ + Name: "GetEmployeeById", + OperationType: "query", + OperationString: "query GetEmployeeById($id: Int!) { employee(id: $id) { id name } }", + }) + + // Create immutable operation registry + opRegistry := NewOperationRegistry(operations) + + handler, err := NewRPCHandler(HandlerConfig{ + GraphQLEndpoint: "http://localhost:4000/graphql", + HTTPClient: &http.Client{}, + Logger: zap.NewNop(), + OperationRegistry: opRegistry, + ProtoLoader: protoLoader, + }) + require.NoError(t, err) + + return handler +} diff --git a/router/pkg/connectrpc/operation_loader.go b/router/pkg/connectrpc/operation_loader.go new file mode 100644 index 0000000000..9d0889e581 --- /dev/null +++ b/router/pkg/connectrpc/operation_loader.go @@ -0,0 +1,119 @@ +package connectrpc + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "go.uber.org/zap" + + "github.com/wundergraph/cosmo/router/pkg/schemaloader" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/astparser" +) + +// LoadOperationsForService loads GraphQL operations for a specific service from operation files. +// Operations are scoped to the service's fully qualified name (package.service). +// Returns a map of operation name -> Operation for the service. +func LoadOperationsForService(serviceName string, operationFiles []string, logger *zap.Logger) (map[string]*schemaloader.Operation, error) { + if serviceName == "" { + return nil, fmt.Errorf("service name cannot be empty") + } + + if logger == nil { + logger = zap.NewNop() + } + + logger.Debug("loading operations for service", + zap.String("service", serviceName), + zap.Int("file_count", len(operationFiles))) + + operations := make(map[string]*schemaloader.Operation) + + // Track operation names to detect duplicates within this service + seenOperations := make(map[string]string) // operation name -> file path + + // Load each operation file + for _, filePath := range operationFiles { + content, err := os.ReadFile(filePath) + if err != nil { + logger.Warn("failed to read operation file", + zap.String("file", filePath), + zap.Error(err)) + continue + } + + operationString := string(content) + + // Parse to extract operation name and type + opDoc, report := astparser.ParseGraphqlDocumentString(operationString) + if report.HasErrors() { + logger.Warn("failed to parse operation file", + zap.String("file", filePath), + zap.String("error", report.Error())) + continue + } + + // Validate exactly one operation per file + operationCount := len(opDoc.OperationDefinitions) + if operationCount != 1 { + logger.Warn("expected exactly one operation definition in file", + zap.String("file", filePath), + zap.Int("operation_count", operationCount)) + continue + } + + // Extract operation name and type + opName, opType, err := schemaloader.GetOperationNameAndType(&opDoc) + if err != nil { + logger.Warn("failed to extract operation info", + zap.String("file", filePath), + zap.Error(err)) + continue + } + + // If no operation name, use filename without extension + if opName == "" { + opName = strings.TrimSuffix(filepath.Base(filePath), filepath.Ext(filePath)) + } + + // Check for duplicate operation names within this service + if existingFile, exists := seenOperations[opName]; exists { + logger.Warn("duplicate operation name within service, last one wins", + zap.String("service", serviceName), + zap.String("operation", opName), + zap.String("previous_file", existingFile), + zap.String("current_file", filePath)) + } + + operation := &schemaloader.Operation{ + Name: opName, + FilePath: filePath, + Document: opDoc, + OperationString: operationString, + OperationType: opType, + } + + operations[opName] = operation + seenOperations[opName] = filePath + + logger.Debug("loaded operation for service", + zap.String("service", serviceName), + zap.String("operation", opName), + zap.String("type", opType), + zap.String("file", filePath)) + } + + // Return error if operation files were provided but none were successfully loaded + if len(operationFiles) > 0 && len(operations) == 0 { + return nil, fmt.Errorf("all %d operation file(s) failed to load for service %s", len(operationFiles), serviceName) + } + + logger.Info("loaded operations for service", + zap.String("service", serviceName), + zap.Int("operation_count", len(operations))) + + return operations, nil +} + diff --git a/router/pkg/connectrpc/operation_registry.go b/router/pkg/connectrpc/operation_registry.go new file mode 100644 index 0000000000..ef2652380d --- /dev/null +++ b/router/pkg/connectrpc/operation_registry.go @@ -0,0 +1,113 @@ +package connectrpc + +import ( + "github.com/wundergraph/cosmo/router/pkg/schemaloader" +) + +// OperationRegistry manages pre-defined GraphQL operations for ConnectRPC. +// Operations are scoped to their service (package.service) and cached in memory +// for fast access during request handling. +// +// Thread-safety: This registry is immutable after creation, making it safe for +// concurrent reads without any locking overhead. To update operations, create a +// new registry instance with the updated data. +type OperationRegistry struct { + // Service-scoped operations: serviceName (package.service) -> operationName -> Operation + // This map is immutable after construction - no locks needed for reads + operations map[string]map[string]*schemaloader.Operation +} + +// NewOperationRegistry creates a new immutable operation registry with pre-built operations. +// The operations map is used as-is without copying, so callers should not modify it after passing. +func NewOperationRegistry(operations map[string]map[string]*schemaloader.Operation) *OperationRegistry { + if operations == nil { + operations = make(map[string]map[string]*schemaloader.Operation) + } + + return &OperationRegistry{ + operations: operations, + } +} + +// GetOperationForService retrieves an operation for a specific service. +// Returns nil if the service or operation is not found. +// This method is safe for concurrent use (no locking needed due to immutability). +func (r *OperationRegistry) GetOperationForService(serviceName, operationName string) *schemaloader.Operation { + serviceOps, exists := r.operations[serviceName] + if !exists { + return nil + } + + return serviceOps[operationName] +} + +// HasOperationForService checks if an operation exists for a specific service. +// This method is safe for concurrent use (no locking needed due to immutability). +func (r *OperationRegistry) HasOperationForService(serviceName, operationName string) bool { + serviceOps, exists := r.operations[serviceName] + if !exists { + return false + } + + _, exists = serviceOps[operationName] + return exists +} + +// GetAllOperationsForService returns all operations for a specific service. +// The returned slice is a copy to prevent external modification. +// Returns an empty slice if the service doesn't exist. +// This method is safe for concurrent use (no locking needed due to immutability). +func (r *OperationRegistry) GetAllOperationsForService(serviceName string) []schemaloader.Operation { + serviceOps, exists := r.operations[serviceName] + if !exists { + return []schemaloader.Operation{} + } + + operations := make([]schemaloader.Operation, 0, len(serviceOps)) + for _, op := range serviceOps { + operations = append(operations, *op) + } + + return operations +} + +// GetAllOperations returns all operations across all services. +// The returned slice is a copy to prevent external modification. +// This method is safe for concurrent use (no locking needed due to immutability). +func (r *OperationRegistry) GetAllOperations() []schemaloader.Operation { + var operations []schemaloader.Operation + for _, serviceOps := range r.operations { + for _, op := range serviceOps { + operations = append(operations, *op) + } + } + + return operations +} + +// Count returns the total number of operations across all services. +// This method is safe for concurrent use (no locking needed due to immutability). +func (r *OperationRegistry) Count() int { + count := 0 + for _, serviceOps := range r.operations { + count += len(serviceOps) + } + return count +} + +// CountForService returns the number of operations for a specific service. +// This method is safe for concurrent use (no locking needed due to immutability). +func (r *OperationRegistry) CountForService(serviceName string) int { + return len(r.operations[serviceName]) +} + +// GetServiceNames returns all service names that have operations registered. +// This method is safe for concurrent use (no locking needed due to immutability). +func (r *OperationRegistry) GetServiceNames() []string { + names := make([]string, 0, len(r.operations)) + for serviceName := range r.operations { + names = append(names, serviceName) + } + + return names +} diff --git a/router/pkg/connectrpc/operation_registry_test.go b/router/pkg/connectrpc/operation_registry_test.go new file mode 100644 index 0000000000..918b8e9b66 --- /dev/null +++ b/router/pkg/connectrpc/operation_registry_test.go @@ -0,0 +1,430 @@ +package connectrpc + +import ( + "os" + "path/filepath" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/wundergraph/cosmo/router/pkg/schemaloader" +) + +func TestLoadOperationsForService(t *testing.T) { + t.Run("loads operations for service successfully", func(t *testing.T) { + tempDir := t.TempDir() + serviceName := "employee.v1.EmployeeService" + + // Create test operation files + testFiles := map[string]string{ + "GetEmployee.graphql": `query GetEmployee($id: ID!) { + employee(id: $id) { + id + name + email + } +}`, + "ListEmployees.graphql": `query ListEmployees { + employees { + id + name + } +}`, + "UpdateEmployee.graphql": `mutation UpdateEmployee($id: ID!, $name: String!) { + updateEmployee(id: $id, name: $name) { + id + name + } +}`, + } + + var operationFiles []string + for filename, content := range testFiles { + filePath := filepath.Join(tempDir, filename) + err := os.WriteFile(filePath, []byte(content), 0644) + require.NoError(t, err) + operationFiles = append(operationFiles, filePath) + } + + serviceOps, err := LoadOperationsForService(serviceName, operationFiles, zap.NewNop()) + require.NoError(t, err) + assert.Equal(t, 3, len(serviceOps)) + + // Create registry with loaded operations + allOps := map[string]map[string]*schemaloader.Operation{ + serviceName: serviceOps, + } + registry := NewOperationRegistry(allOps) + + // Verify operations are loaded for the service + assert.True(t, registry.HasOperationForService(serviceName, "GetEmployee")) + assert.True(t, registry.HasOperationForService(serviceName, "ListEmployees")) + assert.True(t, registry.HasOperationForService(serviceName, "UpdateEmployee")) + }) + + t.Run("returns error for empty service name", func(t *testing.T) { + _, err := LoadOperationsForService("", []string{}, zap.NewNop()) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "service name cannot be empty") + }) + + t.Run("handles empty operation files list", func(t *testing.T) { + serviceName := "test.v1.TestService" + + serviceOps, err := LoadOperationsForService(serviceName, []string{}, zap.NewNop()) + + require.NoError(t, err) + assert.Equal(t, 0, len(serviceOps)) + }) + + t.Run("loads operations for multiple services independently", func(t *testing.T) { + tempDir := t.TempDir() + + // Service 1 + service1 := "employee.v1.EmployeeService" + op1File := filepath.Join(tempDir, "GetEmployee.graphql") + err := os.WriteFile(op1File, []byte(`query GetEmployee { employee { id } }`), 0644) + require.NoError(t, err) + + // Service 2 + service2 := "product.v1.ProductService" + op2File := filepath.Join(tempDir, "GetProduct.graphql") + err = os.WriteFile(op2File, []byte(`query GetProduct { product { id } }`), 0644) + require.NoError(t, err) + + service1Ops, err := LoadOperationsForService(service1, []string{op1File}, zap.NewNop()) + require.NoError(t, err) + + service2Ops, err := LoadOperationsForService(service2, []string{op2File}, zap.NewNop()) + require.NoError(t, err) + + allOps := map[string]map[string]*schemaloader.Operation{ + service1: service1Ops, + service2: service2Ops, + } + registry := NewOperationRegistry(allOps) + + // Verify operations are scoped to their services + assert.True(t, registry.HasOperationForService(service1, "GetEmployee")) + assert.False(t, registry.HasOperationForService(service1, "GetProduct")) + + assert.True(t, registry.HasOperationForService(service2, "GetProduct")) + assert.False(t, registry.HasOperationForService(service2, "GetEmployee")) + }) +} + +func TestGetOperationForService(t *testing.T) { + t.Run("returns operation when found", func(t *testing.T) { + tempDir := t.TempDir() + serviceName := "employee.v1.EmployeeService" + opContent := `query GetEmployee($id: ID!) { + employee(id: $id) { + id + name + } +}` + opFile := filepath.Join(tempDir, "GetEmployee.graphql") + err := os.WriteFile(opFile, []byte(opContent), 0644) + require.NoError(t, err) + + serviceOps, err := LoadOperationsForService(serviceName, []string{opFile}, zap.NewNop()) + require.NoError(t, err) + + allOps := map[string]map[string]*schemaloader.Operation{ + serviceName: serviceOps, + } + registry := NewOperationRegistry(allOps) + + op := registry.GetOperationForService(serviceName, "GetEmployee") + assert.NotNil(t, op) + assert.Equal(t, "GetEmployee", op.Name) + assert.Equal(t, "query", op.OperationType) + assert.Contains(t, op.OperationString, "GetEmployee") + }) + + t.Run("returns nil for non-existent operation", func(t *testing.T) { + registry := NewOperationRegistry(nil) + serviceName := "test.v1.TestService" + op := registry.GetOperationForService(serviceName, "NonExistent") + assert.Nil(t, op) + }) + + t.Run("returns nil for non-existent service", func(t *testing.T) { + registry := NewOperationRegistry(nil) + op := registry.GetOperationForService("nonexistent.Service", "AnyOperation") + assert.Nil(t, op) + }) +} + +func TestHasOperationForService(t *testing.T) { + t.Run("returns true for existing operation", func(t *testing.T) { + tempDir := t.TempDir() + serviceName := "test.v1.TestService" + opContent := `query TestQuery { test }` + opFile := filepath.Join(tempDir, "Test.graphql") + err := os.WriteFile(opFile, []byte(opContent), 0644) + require.NoError(t, err) + + serviceOps, err := LoadOperationsForService(serviceName, []string{opFile}, zap.NewNop()) + require.NoError(t, err) + + allOps := map[string]map[string]*schemaloader.Operation{ + serviceName: serviceOps, + } + registry := NewOperationRegistry(allOps) + + assert.True(t, registry.HasOperationForService(serviceName, "TestQuery")) + }) + + t.Run("returns false for non-existent operation", func(t *testing.T) { + registry := NewOperationRegistry(nil) + assert.False(t, registry.HasOperationForService("test.Service", "NonExistent")) + }) + + t.Run("returns false for non-existent service", func(t *testing.T) { + registry := NewOperationRegistry(nil) + assert.False(t, registry.HasOperationForService("nonexistent.Service", "AnyOperation")) + }) +} + +func TestGetAllOperationsForService(t *testing.T) { + t.Run("returns all operations for service", func(t *testing.T) { + tempDir := t.TempDir() + serviceName := "test.v1.TestService" + + testFiles := map[string]string{ + "Op1.graphql": `query Op1 { field1 }`, + "Op2.graphql": `query Op2 { field2 }`, + } + + var operationFiles []string + for filename, content := range testFiles { + filePath := filepath.Join(tempDir, filename) + err := os.WriteFile(filePath, []byte(content), 0644) + require.NoError(t, err) + operationFiles = append(operationFiles, filePath) + } + + serviceOps, err := LoadOperationsForService(serviceName, operationFiles, zap.NewNop()) + require.NoError(t, err) + + allOps := map[string]map[string]*schemaloader.Operation{ + serviceName: serviceOps, + } + registry := NewOperationRegistry(allOps) + + operations := registry.GetAllOperationsForService(serviceName) + assert.Len(t, operations, 2) + + // Verify operation names + names := make(map[string]bool) + for _, op := range operations { + names[op.Name] = true + } + assert.True(t, names["Op1"]) + assert.True(t, names["Op2"]) + }) + + t.Run("returns empty slice for non-existent service", func(t *testing.T) { + registry := NewOperationRegistry(nil) + operations := registry.GetAllOperationsForService("nonexistent.Service") + assert.NotNil(t, operations) + assert.Len(t, operations, 0) + }) +} + +func TestCountForService(t *testing.T) { + t.Run("returns correct count for service", func(t *testing.T) { + tempDir := t.TempDir() + serviceName := "test.v1.TestService" + + registry := NewOperationRegistry(nil) + assert.Equal(t, 0, registry.CountForService(serviceName)) + + // Add operations + op1File := filepath.Join(tempDir, "Op1.graphql") + err := os.WriteFile(op1File, []byte(`query Op1 { test }`), 0644) + require.NoError(t, err) + + serviceOps, err := LoadOperationsForService(serviceName, []string{op1File}, zap.NewNop()) + require.NoError(t, err) + + allOps := map[string]map[string]*schemaloader.Operation{ + serviceName: serviceOps, + } + registry = NewOperationRegistry(allOps) + assert.Equal(t, 1, registry.CountForService(serviceName)) + }) + + t.Run("returns zero for non-existent service", func(t *testing.T) { + registry := NewOperationRegistry(nil) + assert.Equal(t, 0, registry.CountForService("nonexistent.Service")) + }) +} + +func TestCount(t *testing.T) { + t.Run("returns total count across all services", func(t *testing.T) { + tempDir := t.TempDir() + + service1 := "service1.v1.Service1" + service2 := "service2.v1.Service2" + + registry := NewOperationRegistry(nil) + assert.Equal(t, 0, registry.Count()) + + // Add operations to service1 + op1File := filepath.Join(tempDir, "Op1.graphql") + err := os.WriteFile(op1File, []byte(`query Op1 { test }`), 0644) + require.NoError(t, err) + service1Ops, err := LoadOperationsForService(service1, []string{op1File}, zap.NewNop()) + require.NoError(t, err) + + allOps := map[string]map[string]*schemaloader.Operation{ + service1: service1Ops, + } + registry = NewOperationRegistry(allOps) + assert.Equal(t, 1, registry.Count()) + + // Add operations to service2 + op2File := filepath.Join(tempDir, "Op2.graphql") + err = os.WriteFile(op2File, []byte(`query Op2 { test }`), 0644) + require.NoError(t, err) + service2Ops, err := LoadOperationsForService(service2, []string{op2File}, zap.NewNop()) + require.NoError(t, err) + + allOps[service2] = service2Ops + registry = NewOperationRegistry(allOps) + assert.Equal(t, 2, registry.Count()) + }) +} + +func TestGetServiceNames(t *testing.T) { + t.Run("returns all service names", func(t *testing.T) { + tempDir := t.TempDir() + + service1 := "employee.v1.EmployeeService" + service2 := "product.v1.ProductService" + + op1File := filepath.Join(tempDir, "Op1.graphql") + err := os.WriteFile(op1File, []byte(`query Op1 { test }`), 0644) + require.NoError(t, err) + + service1Ops, err := LoadOperationsForService(service1, []string{op1File}, zap.NewNop()) + require.NoError(t, err) + service2Ops, err := LoadOperationsForService(service2, []string{op1File}, zap.NewNop()) + require.NoError(t, err) + + allOps := map[string]map[string]*schemaloader.Operation{ + service1: service1Ops, + service2: service2Ops, + } + registry := NewOperationRegistry(allOps) + + names := registry.GetServiceNames() + assert.Len(t, names, 2) + assert.Contains(t, names, service1) + assert.Contains(t, names, service2) + }) + + t.Run("returns empty slice for empty registry", func(t *testing.T) { + registry := NewOperationRegistry(nil) + names := registry.GetServiceNames() + assert.NotNil(t, names) + assert.Len(t, names, 0) + }) +} + +// TestThreadSafety verifies that OperationRegistry is safe for concurrent reads. +// With the immutable pattern, no locking is needed for concurrent reads. +func TestThreadSafety(t *testing.T) { + tempDir := t.TempDir() + serviceName := "test.v1.TestService" + opContent := `query Test { test }` + opFile := filepath.Join(tempDir, "Test.graphql") + err := os.WriteFile(opFile, []byte(opContent), 0644) + require.NoError(t, err) + + serviceOps, err := LoadOperationsForService(serviceName, []string{opFile}, zap.NewNop()) + require.NoError(t, err) + + allOps := map[string]map[string]*schemaloader.Operation{ + serviceName: serviceOps, + } + registry := NewOperationRegistry(allOps) + + t.Run("concurrent reads are safe", func(t *testing.T) { + var wg sync.WaitGroup + + // Start multiple goroutines reading concurrently + for range 10 { + wg.Go(func() { + for range 100 { + _ = registry.GetOperationForService(serviceName, "Test") + _ = registry.HasOperationForService(serviceName, "Test") + _ = registry.GetAllOperationsForService(serviceName) + _ = registry.Count() + _ = registry.CountForService(serviceName) + } + }) + } + + // Wait for all goroutines to complete + wg.Wait() + }) +} + +// Test service-scoped operations with same service names but different packages +func TestServiceScopedOperations(t *testing.T) { + t.Run("same service name different packages work independently", func(t *testing.T) { + tempDir := t.TempDir() + + // Two services with same name but different packages + service1 := "company1.employee.v1.EmployeeService" + service2 := "company2.employee.v1.EmployeeService" + + // Create operations with same name for both services + op1File := filepath.Join(tempDir, "GetEmployee1.graphql") + err := os.WriteFile(op1File, []byte(`query GetEmployee { company1Employee { id name } }`), 0644) + require.NoError(t, err) + + op2File := filepath.Join(tempDir, "GetEmployee2.graphql") + err = os.WriteFile(op2File, []byte(`query GetEmployee { company2Employee { id name } }`), 0644) + require.NoError(t, err) + + // Load operations for both services + service1Ops, err := LoadOperationsForService(service1, []string{op1File}, zap.NewNop()) + require.NoError(t, err) + service2Ops, err := LoadOperationsForService(service2, []string{op2File}, zap.NewNop()) + require.NoError(t, err) + + allOps := map[string]map[string]*schemaloader.Operation{ + service1: service1Ops, + service2: service2Ops, + } + registry := NewOperationRegistry(allOps) + + // Verify both services have their own GetEmployee operation + op1 := registry.GetOperationForService(service1, "GetEmployee") + op2 := registry.GetOperationForService(service2, "GetEmployee") + + assert.NotNil(t, op1) + assert.NotNil(t, op2) + + // Verify they have different content + assert.Contains(t, op1.OperationString, "company1Employee") + assert.Contains(t, op2.OperationString, "company2Employee") + + // Verify operations are isolated + assert.True(t, registry.HasOperationForService(service1, "GetEmployee")) + assert.True(t, registry.HasOperationForService(service2, "GetEmployee")) + + // Verify counts + assert.Equal(t, 1, registry.CountForService(service1)) + assert.Equal(t, 1, registry.CountForService(service2)) + assert.Equal(t, 2, registry.Count()) + }) +} diff --git a/router/pkg/connectrpc/proto_field_options.go b/router/pkg/connectrpc/proto_field_options.go new file mode 100644 index 0000000000..5ef5c62cf4 --- /dev/null +++ b/router/pkg/connectrpc/proto_field_options.go @@ -0,0 +1,36 @@ +package connectrpc + +import "google.golang.org/protobuf/reflect/protoreflect" + +// Protocol Buffer field option constants for ConnectRPC integration +// +// These constants define custom field options used to bridge protobuf and GraphQL. +// Field numbers are in the user-defined extension range (1000-536870911) as per +// the protobuf specification. + +// GraphQLVariableNameFieldNumber is the field number for the graphql_variable_name option. +// +// This option specifies the exact GraphQL variable name to use for a protobuf field +// when the GraphQL variable name doesn't match the expected protobuf JSON format +// (camelCase of snake_case field name). +// +// The extension can be declared locally in any package for portability: +// +// package employee.v1; +// +// extend google.protobuf.FieldOptions { +// string graphql_variable_name = 50001; +// } +// +// message FindEmployeesByCriteriaRequest { +// bool has_pets = 1 [(employee.v1.graphql_variable_name) = "HAS_PETS"]; +// } +// +// Or imported from the canonical annotations.proto: +// +// import "com/wundergraph/connectrpc/options/v1/annotations.proto"; +// +// message FindEmployeesByCriteriaRequest { +// bool has_pets = 1 [(com.wundergraph.connectrpc.options.v1.graphql_variable_name) = "HAS_PETS"]; +// } +const GraphQLVariableNameFieldNumber protoreflect.FieldNumber = 50001 diff --git a/router/pkg/connectrpc/proto_loader.go b/router/pkg/connectrpc/proto_loader.go new file mode 100644 index 0000000000..defcecb878 --- /dev/null +++ b/router/pkg/connectrpc/proto_loader.go @@ -0,0 +1,438 @@ +package connectrpc + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/bufbuild/protocompile" + "github.com/bufbuild/protocompile/linker" + "github.com/bufbuild/protocompile/reporter" + "go.uber.org/zap" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// ServiceDefinition represents a parsed protobuf service +type ServiceDefinition struct { + // FullName is the fully qualified service name (e.g., "mypackage.MyService") + FullName string + // Package is the protobuf package name + Package string + // ServiceName is the simple service name + ServiceName string + // Methods contains all RPC methods in this service + Methods []MethodDefinition + // FileDescriptor is the proto file descriptor + FileDescriptor protoreflect.FileDescriptor + // ServiceDescriptor is the service descriptor + ServiceDescriptor protoreflect.ServiceDescriptor +} + +// MethodDefinition represents a parsed RPC method +type MethodDefinition struct { + // Name is the method name (e.g., "GetUser") + Name string + // FullName is the fully qualified method name + FullName string + // InputType is the fully qualified input message type + InputType string + // OutputType is the fully qualified output message type + OutputType string + // InputMessageDescriptor is the descriptor for the input message + InputMessageDescriptor protoreflect.MessageDescriptor + // OutputMessageDescriptor is the descriptor for the output message + OutputMessageDescriptor protoreflect.MessageDescriptor + // IsClientStreaming indicates if this is a client streaming RPC + IsClientStreaming bool + // IsServerStreaming indicates if this is a server streaming RPC + IsServerStreaming bool +} + +// ProtoLoader handles loading and parsing of protobuf files +type ProtoLoader struct { + logger *zap.Logger + // services maps service full names to their definitions + services map[string]*ServiceDefinition + // files is a custom registry for file descriptors (avoids global registry) + files *protoregistry.Files + // processedFiles tracks which file descriptors we've already processed for service extraction + // Key is the file path to ensure uniqueness across different directories + processedFiles map[string]bool +} + +// NewProtoLoader creates a new proto loader +func NewProtoLoader(logger *zap.Logger) *ProtoLoader { + if logger == nil { + logger = zap.NewNop() + } + + return &ProtoLoader{ + logger: logger, + services: make(map[string]*ServiceDefinition), + files: &protoregistry.Files{}, + processedFiles: make(map[string]bool), + } +} + +// LoadFromDirectory loads all .proto files from a directory +func (pl *ProtoLoader) LoadFromDirectory(dir string) error { + pl.logger.Debug("loading proto files from directory", zap.String("dir", dir)) + + // Find all .proto files + protoFiles, err := pl.findProtoFiles(dir) + if err != nil { + return fmt.Errorf("failed to find proto files: %w", err) + } + + if len(protoFiles) == 0 { + return fmt.Errorf("no proto files found in directory: %s", dir) + } + + pl.logger.Debug("found proto files", zap.Int("count", len(protoFiles))) + + // Compute relative paths for all proto files + relativeFiles := make([]string, 0, len(protoFiles)) + for _, protoFile := range protoFiles { + relPath, err := filepath.Rel(dir, protoFile) + if err != nil { + return fmt.Errorf("failed to compute relative path for %s: %w", protoFile, err) + } + relativeFiles = append(relativeFiles, relPath) + } + + // Parse all files in a single batch with the root directory as import path + // This allows imports to resolve correctly across the entire tree + if err := pl.parseProtoFiles(dir, relativeFiles); err != nil { + return fmt.Errorf("failed to parse proto files: %w", err) + } + + pl.logger.Debug("successfully loaded proto files", + zap.Int("services", len(pl.services))) + + return nil +} + +// LoadFromDirectories loads all .proto files from multiple directories +// and validates that proto package names are unique across all directories. +// The proto package name acts as a namespace, so duplicate packages are not allowed. +func (pl *ProtoLoader) LoadFromDirectories(dirs []string) error { + if len(dirs) == 0 { + return fmt.Errorf("no directories provided") + } + + pl.logger.Info("loading proto files from multiple directories", + zap.Int("directory_count", len(dirs))) + + // Track packages we've seen to enforce uniqueness + seenPackages := make(map[string]string) // package name -> directory + + for _, dir := range dirs { + pl.logger.Debug("loading proto files from directory", zap.String("dir", dir)) + + // Find all .proto files in this directory + protoFiles, err := pl.findProtoFiles(dir) + if err != nil { + return fmt.Errorf("failed to find proto files in %s: %w", dir, err) + } + + if len(protoFiles) == 0 { + pl.logger.Warn("no proto files found in directory", zap.String("dir", dir)) + continue + } + + pl.logger.Debug("found proto files", + zap.String("dir", dir), + zap.Int("count", len(protoFiles))) + + // Track service names before loading to identify new ones + existingServices := make(map[string]bool) + for serviceName := range pl.services { + existingServices[serviceName] = true + } + + // Compute relative paths for all proto files in this directory + relativeFiles := make([]string, 0, len(protoFiles)) + for _, protoFile := range protoFiles { + relPath, err := filepath.Rel(dir, protoFile) + if err != nil { + return fmt.Errorf("failed to compute relative path for %s: %w", protoFile, err) + } + relativeFiles = append(relativeFiles, relPath) + } + + // Parse all files from this directory in a single batch + // Use the directory as the import path so imports resolve correctly + if err := pl.parseProtoFiles(dir, relativeFiles); err != nil { + pl.logger.Error("failed to parse proto files", + zap.String("dir", dir), + zap.Error(err)) + return fmt.Errorf("failed to parse proto files from %s: %w", dir, err) + } + + // Validate package uniqueness for newly added services + for serviceName, service := range pl.services { + // Only check services that were just added in this batch + if existingServices[serviceName] { + continue + } + + packageName := service.Package + if existingDir, exists := seenPackages[packageName]; exists && existingDir != dir { + return fmt.Errorf( + "duplicate proto package '%s' found in multiple directories: '%s' and '%s'. "+ + "Proto package names must be unique across all services", + packageName, existingDir, dir) + } + seenPackages[packageName] = dir + + pl.logger.Debug("registered proto package", + zap.String("package", packageName), + zap.String("dir", dir), + zap.String("service", service.FullName)) + } + } + + pl.logger.Info("successfully loaded proto files from all directories", + zap.Int("total_services", len(pl.services)), + zap.Int("unique_packages", len(seenPackages))) + + return nil +} + +// findProtoFiles recursively finds all .proto files in a directory +func (pl *ProtoLoader) findProtoFiles(dir string) ([]string, error) { + var protoFiles []string + + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if !info.IsDir() && strings.HasSuffix(path, ".proto") { + protoFiles = append(protoFiles, path) + } + + return nil + }) + + if err != nil { + return nil, err + } + + return protoFiles, nil +} + +// parseProtoFiles parses multiple proto files in a single batch using the root directory +// as the import path. This allows imports to resolve correctly across the entire tree. +func (pl *ProtoLoader) parseProtoFiles(rootDir string, relativeFilenames []string) error { + pl.logger.Debug("parsing proto files in batch", + zap.String("root_dir", rootDir), + zap.Int("file_count", len(relativeFilenames))) + + // Create a source resolver with the root directory as import path + sourceResolver := &protocompile.SourceResolver{ + ImportPaths: []string{rootDir}, + } + + // Wrap with standard imports to provide access to well-known proto files + // like google/protobuf/descriptor.proto, google/protobuf/wrappers.proto, etc. + resolverWithStandardImports := protocompile.WithStandardImports(sourceResolver) + + // Create a compiler with the resolver that includes standard imports + compiler := protocompile.Compiler{ + Resolver: resolverWithStandardImports, + // Use a custom reporter to capture errors and warnings + Reporter: reporter.NewReporter( + func(err reporter.ErrorWithPos) error { + pl.logger.Error("proto compilation error", + zap.String("file", err.GetPosition().Filename), + zap.Int("line", err.GetPosition().Line), + zap.Int("col", err.GetPosition().Col), + zap.String("error", err.Unwrap().Error())) + return err + }, + func(err reporter.ErrorWithPos) { + pl.logger.Warn("proto compilation warning", + zap.String("file", err.GetPosition().Filename), + zap.Int("line", err.GetPosition().Line), + zap.Int("col", err.GetPosition().Col), + zap.String("warning", err.Unwrap().Error())) + }, + ), + // Include source code info for better error messages + SourceInfoMode: protocompile.SourceInfoStandard, + } + + // Compile all files in a single batch + ctx := context.Background() + results, err := compiler.Compile(ctx, relativeFilenames...) + if err != nil { + return fmt.Errorf("failed to compile proto files: %w", err) + } + + // Process each file descriptor + for _, result := range results { + if err := pl.processFileDescriptor(result); err != nil { + return fmt.Errorf("failed to process file descriptor: %w", err) + } + } + + return nil +} + +// processFileDescriptor extracts service definitions from a file descriptor +func (pl *ProtoLoader) processFileDescriptor(result linker.File) error { + // linker.File implements protoreflect.FileDescriptor interface + fd := protoreflect.FileDescriptor(result) + filePath := fd.Path() + + // Check if we've already processed this file for service extraction + if pl.processedFiles[string(filePath)] { + pl.logger.Debug("file descriptor already processed for service extraction, skipping", + zap.String("file", string(filePath))) + return nil + } + + // Mark this file as processed + pl.processedFiles[filePath] = true + + // Try to register the file descriptor in our local registry + _, err := pl.files.FindFileByPath(filePath) + if err == nil { + // File path already registered + pl.logger.Debug("file path already registered in local registry, skipping registration", + zap.String("file", filePath)) + } else { + // Register the file descriptor in our LOCAL registry (not global) + if err := pl.files.RegisterFile(fd); err != nil { + pl.logger.Error("file descriptor registration failed in local registry", + zap.String("file", filePath), + zap.Error(err)) + return fmt.Errorf("failed to register file descriptor in local registry: %w", err) + } + + pl.logger.Debug("file descriptor registered successfully in local registry", + zap.String("file", filePath)) + } + + // Extract services from this file descriptor + services := fd.Services() + for i := 0; i < services.Len(); i++ { + service := services.Get(i) + serviceDef := pl.extractServiceDefinition(fd, service) + + pl.services[serviceDef.FullName] = serviceDef + + pl.logger.Debug("extracted service", + zap.String("service", serviceDef.FullName), + zap.Int("methods", len(serviceDef.Methods))) + } + + return nil +} + +// extractServiceDefinition extracts a service definition from a service descriptor +func (pl *ProtoLoader) extractServiceDefinition(fd protoreflect.FileDescriptor, service protoreflect.ServiceDescriptor) *ServiceDefinition { + serviceDef := &ServiceDefinition{ + FullName: string(service.FullName()), + Package: string(fd.Package()), + ServiceName: string(service.Name()), + FileDescriptor: fd, + ServiceDescriptor: service, + Methods: make([]MethodDefinition, 0), + } + + // Extract methods + methods := service.Methods() + for i := 0; i < methods.Len(); i++ { + method := methods.Get(i) + methodDef := MethodDefinition{ + Name: string(method.Name()), + FullName: string(method.FullName()), + InputType: string(method.Input().FullName()), + OutputType: string(method.Output().FullName()), + InputMessageDescriptor: method.Input(), + OutputMessageDescriptor: method.Output(), + IsClientStreaming: method.IsStreamingClient(), + IsServerStreaming: method.IsStreamingServer(), + } + serviceDef.Methods = append(serviceDef.Methods, methodDef) + } + + return serviceDef +} + +// GetServices returns all loaded service definitions. +// The returned map should be treated as read-only to prevent accidental mutation. +func (pl *ProtoLoader) GetServices() map[string]*ServiceDefinition { + return pl.services +} + +// GetService returns a specific service definition by full name +func (pl *ProtoLoader) GetService(fullName string) (*ServiceDefinition, bool) { + service, ok := pl.services[fullName] + return service, ok +} + +// GetMethod finds a method by service and method name +func (pl *ProtoLoader) GetMethod(serviceName, methodName string) (*MethodDefinition, error) { + service, ok := pl.services[serviceName] + if !ok { + return nil, fmt.Errorf("service not found: %s", serviceName) + } + + for i := range service.Methods { + if service.Methods[i].Name == methodName { + return &service.Methods[i], nil + } + } + + return nil, fmt.Errorf("method not found: %s.%s", serviceName, methodName) +} + +// GetFiles returns the custom Files registry containing all loaded file descriptors +// This is used to create a custom type resolver +func (pl *ProtoLoader) GetFiles() *protoregistry.Files { + return pl.files +} + +// getFieldByJSONName finds a field in a message descriptor by its JSON name (camelCase). +// Protobuf JSON uses camelCase field names, but descriptors store the original proto field names. +// This function tries to match by JSON name first, then falls back to the original name. +func getFieldByJSONName(msg protoreflect.MessageDescriptor, jsonName string) protoreflect.FieldDescriptor { + if msg == nil { + return nil + } + fields := msg.Fields() + for i := 0; i < fields.Len(); i++ { + field := fields.Get(i) + // Check if JSON name matches (protobuf automatically generates JSON names) + if field.JSONName() == jsonName { + return field + } + // Fallback: check if the original field name matches + if string(field.Name()) == jsonName { + return field + } + } + return nil +} + +// getEnumType returns the enum descriptor for a field, or nil if not an enum +func getEnumType(field protoreflect.FieldDescriptor) protoreflect.EnumDescriptor { + if field.Kind() == protoreflect.EnumKind { + return field.Enum() + } + return nil +} + +// getMessageType returns the message descriptor for a field, or nil if not a message +func getMessageType(field protoreflect.FieldDescriptor) protoreflect.MessageDescriptor { + if field.Kind() == protoreflect.MessageKind { + return field.Message() + } + return nil +} diff --git a/router/pkg/connectrpc/proto_loader_test.go b/router/pkg/connectrpc/proto_loader_test.go new file mode 100644 index 0000000000..e0c1d251bb --- /dev/null +++ b/router/pkg/connectrpc/proto_loader_test.go @@ -0,0 +1,109 @@ +package connectrpc + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +// setupTestProtoLoaderFromDir is a helper to load proto files from a directory. +// This helper is shared across test files to avoid duplication. +func setupTestProtoLoaderFromDir(t *testing.T, dir string) *ProtoLoader { + t.Helper() + loader := NewProtoLoader(zap.NewNop()) + require.NoError(t, loader.LoadFromDirectory(dir)) + return loader +} + +func TestLoadEmployeeProto(t *testing.T) { + t.Run("loads and parses employee.proto successfully", func(t *testing.T) { + loader := setupTestProtoLoaderFromDir(t, "samples/services/employee.v1") + + // Verify the service was loaded + services := loader.GetServices() + assert.Len(t, services, 1, "Should load exactly one service from employee_only directory") + + // Check the EmployeeService + service, ok := loader.GetService("employee.v1.EmployeeService") + require.True(t, ok, "EmployeeService should be loaded") + + assert.Equal(t, "employee.v1.EmployeeService", service.FullName) + assert.Equal(t, "employee.v1", service.Package) + assert.Equal(t, "EmployeeService", service.ServiceName) + + // Verify expected methods are present + methodNames := make([]string, len(service.Methods)) + for i, method := range service.Methods { + methodNames[i] = method.Name + } + + expectedMethods := []string{ + "UpdateEmployeeMood", + "FindEmployeesByPets", + "FindEmployeesByPetsInlineFragment", + "FindEmployeesByPetsNamedFragment", + "GetEmployeeById", + "GetEmployees", + "GetEmployeesWithMood", + } + + // Verify we have at least the expected methods (allows for future additions) + assert.GreaterOrEqual(t, len(service.Methods), len(expectedMethods)) + + for _, expected := range expectedMethods { + assert.Contains(t, methodNames, expected, "Method %s should be present", expected) + } + }) + + t.Run("verifies query method details", func(t *testing.T) { + loader := setupTestProtoLoaderFromDir(t, "samples/services/employee.v1") + + method, err := loader.GetMethod("employee.v1.EmployeeService", "GetEmployeeById") + require.NoError(t, err) + + assert.Equal(t, "GetEmployeeById", method.Name) + assert.Equal(t, "employee.v1.EmployeeService.GetEmployeeById", method.FullName) + assert.Equal(t, "employee.v1.GetEmployeeByIdRequest", method.InputType) + assert.Equal(t, "employee.v1.GetEmployeeByIdResponse", method.OutputType) + assert.False(t, method.IsClientStreaming) + assert.False(t, method.IsServerStreaming) + }) + + t.Run("verifies mutation method details", func(t *testing.T) { + loader := setupTestProtoLoaderFromDir(t, "samples/services/employee.v1") + + method, err := loader.GetMethod("employee.v1.EmployeeService", "UpdateEmployeeMood") + require.NoError(t, err) + + assert.Equal(t, "UpdateEmployeeMood", method.Name) + assert.Equal(t, "employee.v1.EmployeeService.UpdateEmployeeMood", method.FullName) + assert.Equal(t, "employee.v1.UpdateEmployeeMoodRequest", method.InputType) + assert.Equal(t, "employee.v1.UpdateEmployeeMoodResponse", method.OutputType) + assert.False(t, method.IsClientStreaming) + assert.False(t, method.IsServerStreaming) + }) + + t.Run("verifies all query methods are present", func(t *testing.T) { + loader := setupTestProtoLoaderFromDir(t, "samples/services/employee.v1") + + queryMethods := []string{ + "FindEmployeesByPets", + "FindEmployeesByPetsInlineFragment", + "FindEmployeesByPetsNamedFragment", + "GetEmployeeById", + "GetEmployees", + "GetEmployeesWithMood", + } + + for _, methodName := range queryMethods { + method, err := loader.GetMethod("employee.v1.EmployeeService", methodName) + require.NoError(t, err, "Method %s should be found", methodName) + assert.NotNil(t, method) + assert.False(t, method.IsClientStreaming, "Query method %s should not be client streaming", methodName) + assert.False(t, method.IsServerStreaming, "Query method %s should not be server streaming", methodName) + } + }) + +} diff --git a/router/pkg/connectrpc/samples/services/employee.v1/MutationUpdateEmployeeMood.graphql b/router/pkg/connectrpc/samples/services/employee.v1/MutationUpdateEmployeeMood.graphql new file mode 100644 index 0000000000..ee3f55e902 --- /dev/null +++ b/router/pkg/connectrpc/samples/services/employee.v1/MutationUpdateEmployeeMood.graphql @@ -0,0 +1,11 @@ +mutation UpdateEmployeeMood($employeeId: Int!, $mood: Mood!) { + updateMood(employeeID: $employeeId, mood: $mood) { + id + details { + forename + surname + } + currentMood + derivedMood + } +} diff --git a/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeeById.graphql b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeeById.graphql new file mode 100644 index 0000000000..e056d1c4b7 --- /dev/null +++ b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeeById.graphql @@ -0,0 +1,18 @@ +query GetEmployeeById($employeeId: Int!) { + employee(id: $employeeId) { + id + tag + details { + pets { + name + } + location { + key { + name + } + } + forename + surname + } + } +} diff --git a/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeeByPets.graphql b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeeByPets.graphql new file mode 100644 index 0000000000..b674b5db58 --- /dev/null +++ b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeeByPets.graphql @@ -0,0 +1,12 @@ +query FindEmployeesByPets($hasPets: Boolean!) { + findEmployees(criteria: {hasPets: $hasPets}) { + id + details { + forename + surname + pets { + name + } + } + } +} diff --git a/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeeWithMood.graphql b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeeWithMood.graphql new file mode 100644 index 0000000000..cc4572075e --- /dev/null +++ b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeeWithMood.graphql @@ -0,0 +1,12 @@ +query GetEmployeesWithMood { + employees { + id + currentMood + details { + pets { + gender + name + } + } + } +} diff --git a/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployees.graphql b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployees.graphql new file mode 100644 index 0000000000..f258bc8a67 --- /dev/null +++ b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployees.graphql @@ -0,0 +1,10 @@ +query GetEmployees { + employees { + id + details { + forename + surname + hasChildren + } + } +} diff --git a/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeesByPetsInlineFragment.graphql b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeesByPetsInlineFragment.graphql new file mode 100644 index 0000000000..992c5991fc --- /dev/null +++ b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeesByPetsInlineFragment.graphql @@ -0,0 +1,14 @@ +query FindEmployeesByPetsInlineFragment($hasPets: Boolean!) { + findEmployees(criteria: {hasPets: $hasPets}) { + id + details { + ... on Details { + forename + surname + pets { + name + } + } + } + } +} diff --git a/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeesByPetsNamedFragment.graphql b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeesByPetsNamedFragment.graphql new file mode 100644 index 0000000000..6930c39353 --- /dev/null +++ b/router/pkg/connectrpc/samples/services/employee.v1/QueryGetEmployeesByPetsNamedFragment.graphql @@ -0,0 +1,20 @@ +fragment PetFields on Pet { + name +} + +fragment EmployeeDetails on Details { + forename + surname + pets { + ...PetFields + } +} + +query FindEmployeesByPetsNamedFragment($hasPets: Boolean!) { + findEmployees(criteria: {hasPets: $hasPets}) { + id + details { + ...EmployeeDetails + } + } +} diff --git a/router/pkg/connectrpc/samples/services/employee.v1/service.proto b/router/pkg/connectrpc/samples/services/employee.v1/service.proto new file mode 100644 index 0000000000..a8888955bf --- /dev/null +++ b/router/pkg/connectrpc/samples/services/employee.v1/service.proto @@ -0,0 +1,191 @@ +syntax = "proto3"; +package employee.v1; + +option go_package = "github.com/wundergraph/cosmo/router/pkg/connectrpc/samples/services/employee/v1;employeev1"; + +service EmployeeService { + rpc FindEmployeesByPets(FindEmployeesByPetsRequest) returns (FindEmployeesByPetsResponse) { + option idempotency_level = NO_SIDE_EFFECTS; + } + + rpc FindEmployeesByPetsInlineFragment(FindEmployeesByPetsInlineFragmentRequest) returns (FindEmployeesByPetsInlineFragmentResponse) { + option idempotency_level = NO_SIDE_EFFECTS; + } + + rpc FindEmployeesByPetsNamedFragment(FindEmployeesByPetsNamedFragmentRequest) returns (FindEmployeesByPetsNamedFragmentResponse) { + option idempotency_level = NO_SIDE_EFFECTS; + } + + rpc GetEmployeeById(GetEmployeeByIdRequest) returns (GetEmployeeByIdResponse) { + option idempotency_level = NO_SIDE_EFFECTS; + } + + rpc GetEmployees(GetEmployeesRequest) returns (GetEmployeesResponse) { + option idempotency_level = NO_SIDE_EFFECTS; + } + + rpc GetEmployeesWithMood(GetEmployeesWithMoodRequest) returns (GetEmployeesWithMoodResponse) { + option idempotency_level = NO_SIDE_EFFECTS; + } + + rpc UpdateEmployeeMood(UpdateEmployeeMoodRequest) returns (UpdateEmployeeMoodResponse) {} +} + +message UpdateEmployeeMoodRequest { + int32 employee_id = 1; + Mood mood = 2; +} + +message UpdateEmployeeMoodResponse { + message UpdateMood { + message Details { + string forename = 5; + string surname = 6; + } + int32 id = 1; + Details details = 2; + Mood current_mood = 3; + Mood derived_mood = 4; + } + // This mutation update the mood of an employee. + UpdateMood update_mood = 1; +} + +message GetEmployeeByIdRequest { + int32 employee_id = 1; +} + +message GetEmployeeByIdResponse { + message Employee { + message Details { + message Pets { + string name = 1; + } + message Location { + message Key { + string name = 1; + } + Key key = 1; + } + string forename = 5; + string surname = 6; + repeated Pets pets = 9; + Location location = 10; + } + int32 id = 1; + string tag = 2; + Details details = 3; + } + Employee employee = 1; +} + +message FindEmployeesByPetsRequest { + bool has_pets = 1; +} + +message FindEmployeesByPetsResponse { + message FindEmployees { + message Details { + message Pets { + string name = 1; + } + string forename = 5; + string surname = 6; + repeated Pets pets = 9; + } + int32 id = 1; + Details details = 2; + } + // This is a GraphQL query that retrieves a list of employees. + repeated FindEmployees find_employees = 1; +} + +message GetEmployeesWithMoodRequest { +} + +message GetEmployeesWithMoodResponse { + message Employees { + message Details { + message Pets { + string name = 1; + Gender gender = 3; + } + repeated Pets pets = 9; + } + int32 id = 1; + Details details = 3; + Mood current_mood = 4; + } + repeated Employees employees = 1; +} + +message GetEmployeesRequest { +} + +message GetEmployeesResponse { + message Employees { + message Details { + string forename = 11; + string surname = 12; + bool has_children = 13; + } + int32 id = 1; + Details details = 3; + } + repeated Employees employees = 1; +} + +message FindEmployeesByPetsInlineFragmentRequest { + bool has_pets = 1; +} + +message FindEmployeesByPetsInlineFragmentResponse { + message FindEmployees { + message Details { + message Pets { + string name = 1; + } + string forename = 11; + string surname = 12; + repeated Pets pets = 14; + } + int32 id = 1; + Details details = 2; + } + // This is a GraphQL query that retrieves a list of employees. + repeated FindEmployees find_employees = 1; +} + +message FindEmployeesByPetsNamedFragmentRequest { + bool has_pets = 1; +} + +message FindEmployeesByPetsNamedFragmentResponse { + message FindEmployees { + message Details { + message Pets { + string name = 1; + } + string forename = 11; + string surname = 12; + repeated Pets pets = 14; + } + int32 id = 1; + Details details = 2; + } + // This is a GraphQL query that retrieves a list of employees. + repeated FindEmployees find_employees = 1; +} + +enum Mood { + MOOD_UNSPECIFIED = 0; + MOOD_HAPPY = 1; + MOOD_SAD = 2; +} + +enum Gender { + GENDER_UNSPECIFIED = 0; + GENDER_FEMALE = 1; + GENDER_MALE = 2; + GENDER_UNKNOWN = 3; +} diff --git a/router/pkg/connectrpc/samples/services/employee.v1/service.proto.lock.json b/router/pkg/connectrpc/samples/services/employee.v1/service.proto.lock.json new file mode 100644 index 0000000000..57ea33e42e --- /dev/null +++ b/router/pkg/connectrpc/samples/services/employee.v1/service.proto.lock.json @@ -0,0 +1,143 @@ +{ + "version": "1.0.0", + "messages": { + "UpdateEmployeeMoodRequest": { + "fields": { + "employee_id": 1, + "mood": 2 + } + }, + "UpdateEmployeeMoodResponse": { + "fields": { + "update_mood": 1 + } + }, + "UpdateMood": { + "fields": { + "id": 1, + "details": 2, + "current_mood": 3, + "derived_mood": 4 + } + }, + "Details": { + "fields": { + "forename": 11, + "surname": 12, + "pets": 14 + }, + "reservedNumbers": [ + 4, + 1, + 2, + 3, + 7, + 8, + 10, + 5, + 6, + 9, + 13 + ] + }, + "GetEmployeeByIdRequest": { + "fields": { + "employee_id": 1 + } + }, + "GetEmployeeByIdResponse": { + "fields": { + "employee": 1 + } + }, + "Employee": { + "fields": { + "id": 1, + "tag": 2, + "details": 3 + } + }, + "Pets": { + "fields": { + "name": 1 + }, + "reservedNumbers": [ + 2, + 3 + ] + }, + "Location": { + "fields": { + "key": 1 + } + }, + "Key": { + "fields": { + "name": 1 + } + }, + "FindEmployeesByPetsRequest": { + "fields": { + "has_pets": 1 + } + }, + "FindEmployeesByPetsResponse": { + "fields": { + "find_employees": 1 + } + }, + "FindEmployees": { + "fields": { + "id": 1, + "details": 2 + } + }, + "GetEmployeesWithMoodRequest": { + "fields": {} + }, + "GetEmployeesWithMoodResponse": { + "fields": { + "employees": 1 + } + }, + "Employees": { + "fields": { + "id": 1, + "details": 3 + }, + "reservedNumbers": [ + 2, + 4 + ] + }, + "GetEmployeesRequest": { + "fields": {} + }, + "GetEmployeesResponse": { + "fields": { + "employees": 1 + } + }, + "FindEmployeesByPetsInlineFragmentRequest": { + "fields": { + "has_pets": 1 + } + }, + "FindEmployeesByPetsInlineFragmentResponse": { + "fields": { + "find_employees": 1 + } + }, + "FindEmployeesByPetsNamedFragmentRequest": { + "fields": { + "has_pets": 1 + } + }, + "FindEmployeesByPetsNamedFragmentResponse": { + "fields": { + "find_employees": 1 + } + } + }, + "enums": {} +} \ No newline at end of file diff --git a/router/pkg/connectrpc/server.go b/router/pkg/connectrpc/server.go new file mode 100644 index 0000000000..244d1020a2 --- /dev/null +++ b/router/pkg/connectrpc/server.go @@ -0,0 +1,429 @@ +package connectrpc + +import ( + "bufio" + "context" + "errors" + "fmt" + "net" + "net/http" + "time" + + "connectrpc.com/vanguard" + "github.com/hashicorp/go-retryablehttp" + "go.uber.org/zap" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" + + "github.com/wundergraph/cosmo/router/pkg/cors" + "github.com/wundergraph/cosmo/router/pkg/schemaloader" +) + +// ServerConfig holds configuration for the ConnectRPC server +type ServerConfig struct { + // ServicesDir is the root directory containing all service subdirectories + // Each service directory should contain proto files and GraphQL operations + ServicesDir string + // ListenAddr is the address to listen on + ListenAddr string + // GraphQLEndpoint is the router's GraphQL endpoint + GraphQLEndpoint string + // Logger for structured logging + Logger *zap.Logger + // RequestTimeout for HTTP requests + RequestTimeout time.Duration + // CorsConfig is the CORS configuration for the ConnectRPC server + CorsConfig *cors.Config +} + +// Server is the main ConnectRPC server that handles gRPC/Connect/gRPC-Web requests +type Server struct { + config ServerConfig + logger *zap.Logger + httpServer *http.Server + listener net.Listener + transcoder *vanguard.Transcoder + protoLoader *ProtoLoader + operationRegistry *OperationRegistry + rpcHandler *RPCHandler + vanguardService *VanguardService + httpClient *http.Client +} + +// NewServer creates a new ConnectRPC server and loads all services +func NewServer(config ServerConfig) (*Server, error) { + // Validate configuration + if config.ServicesDir == "" { + return nil, fmt.Errorf("services directory must be provided") + } + + if config.ListenAddr == "" { + config.ListenAddr = "0.0.0.0:5026" + } + + if config.Logger == nil { + return nil, fmt.Errorf("logger is required") + } + + if config.RequestTimeout == 0 { + config.RequestTimeout = 30 * time.Second + } + + // Create HTTP client with retry + retryClient := retryablehttp.NewClient() + retryClient.Logger = nil + httpClient := retryClient.StandardClient() + httpClient.Timeout = config.RequestTimeout + + server := &Server{ + config: config, + logger: config.Logger, + httpClient: httpClient, + } + + startTime := time.Now() + + // Discover services from the services directory + discoveredServices, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: config.ServicesDir, + Logger: config.Logger, + }) + if err != nil { + return nil, fmt.Errorf("failed to discover services: %w", err) + } + + // Create proto loader first (needed by handler) + server.protoLoader = NewProtoLoader(server.logger) + + // Load proto files for each discovered service + packageServiceMap := make(map[string][]string) // package -> list of services + + for _, service := range discoveredServices { + // Load proto files for this service + if err := server.protoLoader.LoadFromDirectory(service.ServiceDir); err != nil { + return nil, fmt.Errorf("failed to load proto files for service %s: %w", service.FullName, err) + } + + // Track packages and services + packageServiceMap[service.Package] = append(packageServiceMap[service.Package], service.ServiceName) + } + + // Build operations map for all services + operationsMap, err := server.buildOperationsMap(discoveredServices) + if err != nil { + return nil, fmt.Errorf("failed to build operations map: %w", err) + } + + // Count total operations + totalOperations := 0 + for _, serviceOps := range operationsMap { + totalOperations += len(serviceOps) + } + + // Warn about services with no operations + for _, service := range discoveredServices { + if len(service.OperationFiles) == 0 { + server.logger.Warn("no operations found for service", + zap.String("service", service.FullName)) + } + } + + // Create immutable operation registry with pre-built operations + server.operationRegistry = NewOperationRegistry(operationsMap) + + // Initialize components (requires protoLoader and operationRegistry to be set) + if err := server.initializeComponents(); err != nil { + return nil, fmt.Errorf("failed to initialize components: %w", err) + } + + // Create service wrapper + vanguardService, err := NewVanguardService(VanguardServiceConfig{ + Handler: server.rpcHandler, + ProtoLoader: server.protoLoader, + Logger: server.logger, + }) + if err != nil { + return nil, fmt.Errorf("failed to create service wrapper: %w", err) + } + server.vanguardService = vanguardService + + // Create protocol transcoder + vanguardServices := vanguardService.GetServices() + transcoder, err := vanguard.NewTranscoder(vanguardServices) + if err != nil { + return nil, fmt.Errorf("failed to create protocol transcoder: %w", err) + } + server.transcoder = transcoder + + // Log consolidated initialization summary at DEBUG level + // The main INFO log will be in router.go + server.logger.Debug("ConnectRPC services loaded", + zap.Int("packages", len(packageServiceMap)), + zap.Int("services", len(discoveredServices)), + zap.Int("operations", totalOperations), + zap.Duration("duration", time.Since(startTime))) + + return server, nil +} + +// Start starts the HTTP server (services must already be loaded via NewServer) +func (s *Server) Start() error { + s.logger.Debug("starting ConnectRPC server", + zap.String("listen_addr", s.config.ListenAddr), + zap.String("services_dir", s.config.ServicesDir), + zap.String("graphql_endpoint", s.config.GraphQLEndpoint)) + + // Verify that services have been loaded + if s.transcoder == nil { + return fmt.Errorf("server not properly initialized - services not loaded") + } + + // Create HTTP server with HTTP/2 support + handler := s.createHandler() + h2cHandler := h2c.NewHandler(handler, &http2.Server{}) + + s.httpServer = &http.Server{ + Addr: s.config.ListenAddr, + Handler: h2cHandler, + ReadTimeout: 30 * time.Second, + WriteTimeout: 30 * time.Second, + IdleTimeout: 60 * time.Second, + } + + s.logger.Debug("HTTP/2 (h2c) support enabled") + + // Create listener to get actual bound address + listener, err := net.Listen("tcp", s.config.ListenAddr) + if err != nil { + return fmt.Errorf("failed to create listener: %w", err) + } + s.listener = listener + + // Start server in goroutine + go func() { + s.logger.Info("ConnectRPC server ready", + zap.String("addr", s.listener.Addr().String())) + + if err := s.httpServer.Serve(s.listener); err != nil && !errors.Is(err, http.ErrServerClosed) { + s.logger.Error("server error", zap.Error(err)) + } + }() + + return nil +} + +// Stop gracefully shuts down the server +func (s *Server) Stop(ctx context.Context) error { + if s.httpServer == nil { + return fmt.Errorf("server is not started") + } + + s.logger.Info("shutting down ConnectRPC server") + + shutdownCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + if err := s.httpServer.Shutdown(shutdownCtx); err != nil { + return fmt.Errorf("failed to shutdown server: %w", err) + } + + s.logger.Info("ConnectRPC server stopped") + return nil +} + +// Reload reloads the server configuration and operations. +// This creates entirely new instances of all components for atomic hot-reload. +func (s *Server) Reload() error { + // Check if server has been started + if s.httpServer == nil { + return fmt.Errorf("server not started; call Start before Reload") + } + + s.logger.Info("reloading ConnectRPC server") + + // Discover services from the services directory + discoveredServices, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: s.config.ServicesDir, + Logger: s.logger, + }) + if err != nil { + return fmt.Errorf("failed to discover services: %w", err) + } + + // Create a fresh proto loader + s.protoLoader = NewProtoLoader(s.logger) + + // Load proto files for each service + for _, service := range discoveredServices { + if err := s.protoLoader.LoadFromDirectory(service.ServiceDir); err != nil { + return fmt.Errorf("failed to reload proto files for service %s: %w", service.FullName, err) + } + } + + // Build operations map for all services + operationsMap, err := s.buildOperationsMap(discoveredServices) + if err != nil { + return fmt.Errorf("failed to build operations map: %w", err) + } + + // Create new immutable operation registry with pre-built operations + s.operationRegistry = NewOperationRegistry(operationsMap) + + // Reinitialize components with fresh proto loader and operation registry + if err := s.initializeComponents(); err != nil { + return fmt.Errorf("failed to reinitialize components: %w", err) + } + + // Recreate service wrapper + vanguardService, err := NewVanguardService(VanguardServiceConfig{ + Handler: s.rpcHandler, + ProtoLoader: s.protoLoader, + Logger: s.logger, + }) + if err != nil { + return fmt.Errorf("failed to recreate service wrapper: %w", err) + } + s.vanguardService = vanguardService + + // Recreate protocol transcoder + transcoder, err := vanguard.NewTranscoder(vanguardService.GetServices()) + if err != nil { + return fmt.Errorf("failed to recreate protocol transcoder: %w", err) + } + s.transcoder = transcoder + + // Update HTTP server handler with h2c wrapper for gRPC compatibility + handler := s.createHandler() + s.httpServer.Handler = h2c.NewHandler(handler, &http2.Server{}) + + s.logger.Info("ConnectRPC server reloaded successfully") + return nil +} + +// initializeComponents initializes the server components using the caller-populated operation registry. +// The operation registry must be set by the caller before calling this method. +func (s *Server) initializeComponents() error { + // Create RPC handler + // Note: ProtoLoader and OperationRegistry must be set before calling this + var err error + s.rpcHandler, err = NewRPCHandler(HandlerConfig{ + GraphQLEndpoint: s.config.GraphQLEndpoint, + HTTPClient: s.httpClient, + Logger: s.logger, + OperationRegistry: s.operationRegistry, + ProtoLoader: s.protoLoader, + }) + if err != nil { + return fmt.Errorf("failed to create RPC handler: %w", err) + } + + return nil +} + +// buildOperationsMap builds the complete operations map for all services. +// This should be called after proto files are loaded. +func (s *Server) buildOperationsMap(discoveredServices []DiscoveredService) (map[string]map[string]*schemaloader.Operation, error) { + allOperations := make(map[string]map[string]*schemaloader.Operation) + + for _, service := range discoveredServices { + if len(service.OperationFiles) > 0 { + serviceOps, err := LoadOperationsForService(service.FullName, service.OperationFiles, s.logger) + if err != nil { + return nil, fmt.Errorf("failed to load operations for service %s: %w", service.FullName, err) + } + allOperations[service.FullName] = serviceOps + } + } + + return allOperations, nil +} + +// createHandler creates the HTTP handler +func (s *Server) createHandler() http.Handler { + mux := http.NewServeMux() + + // Wrap transcoder with response writer that implements required interfaces + wrappedTranscoder := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Create a response writer that implements required interfaces for gRPC streaming + rw := &responseWriter{ResponseWriter: w} + + // The transcoder handles protocol translation and routing + s.transcoder.ServeHTTP(rw, r) + }) + + // Apply CORS middleware if enabled + var handler http.Handler = wrappedTranscoder + if s.config.CorsConfig != nil && s.config.CorsConfig.Enabled { + corsMiddleware := cors.New(*s.config.CorsConfig) + handler = corsMiddleware(wrappedTranscoder) + } + + // Mount handler at root + mux.Handle("/", handler) + + return mux +} + +// GetServiceCount returns the number of registered services +func (s *Server) GetServiceCount() int { + if s.vanguardService == nil { + return 0 + } + return s.vanguardService.GetServiceCount() +} + +// GetServiceNames returns the names of all registered services +func (s *Server) GetServiceNames() []string { + if s.vanguardService == nil { + return nil + } + return s.vanguardService.GetServiceNames() +} + +// responseWriter wraps http.ResponseWriter and implements required interfaces for gRPC streaming +type responseWriter struct { + http.ResponseWriter +} + +func (rw *responseWriter) WriteHeader(code int) { + rw.ResponseWriter.WriteHeader(code) +} + +// Flush implements http.Flusher interface (required for gRPC streaming) +func (rw *responseWriter) Flush() { + if f, ok := rw.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// Push implements http.Pusher interface (for HTTP/2 server push) +func (rw *responseWriter) Push(target string, opts *http.PushOptions) error { + if p, ok := rw.ResponseWriter.(http.Pusher); ok { + return p.Push(target, opts) + } + return http.ErrNotSupported +} + +// Hijack implements http.Hijacker interface (for connection hijacking) +func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if h, ok := rw.ResponseWriter.(http.Hijacker); ok { + return h.Hijack() + } + return nil, nil, http.ErrNotSupported +} + +// GetOperationCount returns the number of operations/methods available +func (s *Server) GetOperationCount() int { + if s.rpcHandler == nil { + return 0 + } + return s.rpcHandler.GetOperationCount() +} + +// Addr returns the server's actual listening address +func (s *Server) Addr() net.Addr { + if s.listener == nil { + return nil + } + return s.listener.Addr() +} diff --git a/router/pkg/connectrpc/server_test.go b/router/pkg/connectrpc/server_test.go new file mode 100644 index 0000000000..26c90a9df9 --- /dev/null +++ b/router/pkg/connectrpc/server_test.go @@ -0,0 +1,79 @@ +package connectrpc + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +func TestNewServer(t *testing.T) { + t.Run("creates server with valid config", func(t *testing.T) { + server, err := NewServer(ServerConfig{ + ServicesDir: "samples/services", + GraphQLEndpoint: "http://localhost:4000/graphql", + ListenAddr: "localhost:5026", + Logger: zap.NewNop(), + }) + + require.NoError(t, err) + assert.NotNil(t, server) + assert.Equal(t, "samples/services", server.config.ServicesDir) + assert.Equal(t, "http://localhost:4000/graphql", server.config.GraphQLEndpoint) + }) + + t.Run("uses default listen address", func(t *testing.T) { + server, err := NewServer(ServerConfig{ + ServicesDir: "samples/services", + GraphQLEndpoint: "http://localhost:4000/graphql", + Logger: zap.NewNop(), + }) + + require.NoError(t, err) + assert.Equal(t, "0.0.0.0:5026", server.config.ListenAddr) + }) + + t.Run("uses default timeout", func(t *testing.T) { + server, err := NewServer(ServerConfig{ + ServicesDir: "samples/services", + GraphQLEndpoint: "http://localhost:4000/graphql", + Logger: zap.NewNop(), + }) + + require.NoError(t, err) + assert.Equal(t, 30*time.Second, server.config.RequestTimeout) + }) + + t.Run("returns error when services dir is empty", func(t *testing.T) { + _, err := NewServer(ServerConfig{ + GraphQLEndpoint: "http://localhost:4000/graphql", + Logger: zap.NewNop(), + }) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "services directory must be provided") + }) + + t.Run("returns error when graphql endpoint is empty", func(t *testing.T) { + _, err := NewServer(ServerConfig{ + ServicesDir: "samples/services", + Logger: zap.NewNop(), + }) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "graphql endpoint cannot be empty") + }) + + t.Run("returns error when logger is nil", func(t *testing.T) { + _, err := NewServer(ServerConfig{ + ServicesDir: "samples/services", + GraphQLEndpoint: "http://localhost:4000/graphql", + Logger: nil, + }) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "logger is required") + }) +} diff --git a/router/pkg/connectrpc/service_discovery.go b/router/pkg/connectrpc/service_discovery.go new file mode 100644 index 0000000000..9cd6425f73 --- /dev/null +++ b/router/pkg/connectrpc/service_discovery.go @@ -0,0 +1,276 @@ +package connectrpc + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "go.uber.org/zap" +) + +// DiscoveredService represents a service found during directory scanning +type DiscoveredService struct { + // ServiceDir is the directory containing the service's proto and operation files + ServiceDir string + // ProtoFiles are the proto files found in this service directory + ProtoFiles []string + // OperationFiles are the GraphQL operation files found recursively in this service directory + OperationFiles []string + // Package is the proto package name extracted from proto files + Package string + // ServiceName is the service name extracted from proto files + ServiceName string + // FullName is the fully qualified service name (package.service) + FullName string +} + +// ServiceDiscoveryConfig holds configuration for service discovery +type ServiceDiscoveryConfig struct { + // ServicesDir is the root directory containing all service subdirectories + ServicesDir string + // Logger for structured logging + Logger *zap.Logger +} + +// DiscoverServices scans a services directory and discovers all services based on convention. +// It looks for subdirectories containing .proto files and returns information about each service. +// +// Directory structure can be: +// - Flat: services/employee.v1/*.proto +// - Nested: services/company/employee.v1/*.proto +// +// Each service directory must contain at least one .proto file. +// All .proto files in a service directory must declare the same package. +// The service name is extracted from the proto files, not the directory name. +func DiscoverServices(config ServiceDiscoveryConfig) ([]DiscoveredService, error) { + if config.ServicesDir == "" { + return nil, fmt.Errorf("services directory cannot be empty") + } + + if config.Logger == nil { + config.Logger = zap.NewNop() + } + + config.Logger.Debug("discovering services", + zap.String("services_dir", config.ServicesDir)) + + // Check if services directory exists + if _, err := os.Stat(config.ServicesDir); os.IsNotExist(err) { + return nil, fmt.Errorf("services directory does not exist: %s", config.ServicesDir) + } + + var discoveredServices []DiscoveredService + seenPackageService := make(map[string]string) // "package.service" -> directory + + // Walk the services directory to find all directories (including root) with proto files + err := filepath.Walk(config.ServicesDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Only process directories (including the root services directory) + if !info.IsDir() { + return nil + } + + // Check if this directory contains proto files + protoFiles, err := findProtoFilesInDir(path) + if err != nil { + return fmt.Errorf("failed to find proto files in %s: %w", path, err) + } + + // Skip directories without proto files - continue searching subdirectories + if len(protoFiles) == 0 { + return nil + } + + config.Logger.Debug("found directory with proto files", + zap.String("dir", path), + zap.Int("proto_count", len(protoFiles))) + + // Extract package and service information from proto files + packageName, serviceName, err := extractServiceInfo(protoFiles, config.Logger) + if err != nil { + return fmt.Errorf("failed to extract service info from %s: %w", path, err) + } + + fullName := fmt.Sprintf("%s.%s", packageName, serviceName) + + // Validate package.service uniqueness + if existingDir, exists := seenPackageService[fullName]; exists { + return fmt.Errorf( + "duplicate service '%s' found in multiple directories: '%s' and '%s'. "+ + "Each package.service combination must be unique", + fullName, existingDir, path) + } + seenPackageService[fullName] = path + + // Find all operation files recursively in this service directory + operationFiles, err := findOperationFiles(path) + if err != nil { + config.Logger.Warn("failed to find operation files", + zap.String("dir", path), + zap.Error(err)) + operationFiles = []string{} // Continue even if no operations found + } + + discoveredServices = append(discoveredServices, DiscoveredService{ + ServiceDir: path, + ProtoFiles: protoFiles, + OperationFiles: operationFiles, + Package: packageName, + ServiceName: serviceName, + FullName: fullName, + }) + + config.Logger.Info("discovered service", + zap.String("full_name", fullName), + zap.String("package", packageName), + zap.String("service", serviceName), + zap.String("dir", path), + zap.Int("proto_files", len(protoFiles)), + zap.Int("operation_files", len(operationFiles))) + + // Don't descend into subdirectories of a service directory + // This prevents finding the same service multiple times + return filepath.SkipDir + }) + + if err != nil { + return nil, fmt.Errorf("failed to discover services: %w", err) + } + + if len(discoveredServices) == 0 { + return nil, fmt.Errorf("no services found in directory: %s", config.ServicesDir) + } + + config.Logger.Info("service discovery complete", + zap.Int("total_services", len(discoveredServices)), + zap.String("services_dir", config.ServicesDir)) + + return discoveredServices, nil +} + +// findProtoFilesInDir finds all .proto files directly in a directory (non-recursive) +func findProtoFilesInDir(dir string) ([]string, error) { + var protoFiles []string + + entries, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + if strings.HasSuffix(entry.Name(), ".proto") { + protoFiles = append(protoFiles, filepath.Join(dir, entry.Name())) + } + } + + return protoFiles, nil +} + +// extractServiceInfo extracts package and service name from proto files. +// Only one proto file is expected per directory. +// Returns the package name and service name. +func extractServiceInfo(protoFiles []string, logger *zap.Logger) (string, string, error) { + if len(protoFiles) == 0 { + return "", "", fmt.Errorf("no proto files provided") + } + + // Enforce one proto file per directory + if len(protoFiles) > 1 { + return "", "", fmt.Errorf( + "only one proto file is allowed per directory, found %d proto files. "+ + "Each service should have its own directory with a single proto file", + len(protoFiles)) + } + + protoFile := protoFiles[0] + content, err := os.ReadFile(protoFile) + if err != nil { + return "", "", fmt.Errorf("failed to read proto file %s: %w", protoFile, err) + } + + // Extract package name + packageName := extractPackageFromProto(string(content)) + if packageName == "" { + return "", "", fmt.Errorf("no package declaration found in %s", protoFile) + } + + // Extract service name + serviceName := extractServiceNameFromProto(string(content)) + if serviceName == "" { + return "", "", fmt.Errorf("no service declaration found in %s", protoFile) + } + + logger.Debug("extracted service info from proto", + zap.String("file", protoFile), + zap.String("package", packageName), + zap.String("service", serviceName)) + + return packageName, serviceName, nil +} + +// extractPackageFromProto extracts the package name from proto file content +func extractPackageFromProto(content string) string { + lines := strings.SplitSeq(content, "\n") + for line := range lines { + line = strings.TrimSpace(line) + if after, ok := strings.CutPrefix(line, "package "); ok { + // Extract package name: "package foo.bar;" -> "foo.bar" + pkg := after + pkg = strings.TrimSuffix(pkg, ";") + pkg = strings.TrimSpace(pkg) + return pkg + } + } + return "" +} + +// extractServiceNameFromProto extracts the first service name from proto file content +func extractServiceNameFromProto(content string) string { + lines := strings.SplitSeq(content, "\n") + for line := range lines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "service ") { + // Extract service name: "service MyService {" -> "MyService" + parts := strings.Fields(line) + if len(parts) >= 2 { + serviceName := parts[1] + // Remove trailing { if present + serviceName = strings.TrimSuffix(serviceName, "{") + serviceName = strings.TrimSpace(serviceName) + return serviceName + } + } + } + return "" +} + +// findOperationFiles finds all .graphql files in a directory and its subdirectories +func findOperationFiles(dir string) ([]string, error) { + var operationFiles []string + + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if !info.IsDir() && (strings.HasSuffix(path, ".graphql") || strings.HasSuffix(path, ".gql")) { + operationFiles = append(operationFiles, path) + } + + return nil + }) + + if err != nil { + return nil, err + } + + return operationFiles, nil +} diff --git a/router/pkg/connectrpc/service_discovery_test.go b/router/pkg/connectrpc/service_discovery_test.go new file mode 100644 index 0000000000..691fab23b3 --- /dev/null +++ b/router/pkg/connectrpc/service_discovery_test.go @@ -0,0 +1,353 @@ +package connectrpc + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +func TestDiscoverServices(t *testing.T) { + t.Run("discovers single service with proto and operations", func(t *testing.T) { + // Create temporary test directory structure + tmpDir := t.TempDir() + serviceDir := filepath.Join(tmpDir, "employee.v1") + require.NoError(t, os.MkdirAll(serviceDir, 0755)) + + // Create proto file + protoContent := `syntax = "proto3"; +package employee.v1; + +service EmployeeService { + rpc GetEmployee(GetEmployeeRequest) returns (GetEmployeeResponse); +} + +message GetEmployeeRequest { + int32 id = 1; +} + +message GetEmployeeResponse { + string name = 1; +} +` + require.NoError(t, os.WriteFile(filepath.Join(serviceDir, "employee.proto"), []byte(protoContent), 0644)) + + // Create GraphQL operation files + require.NoError(t, os.WriteFile(filepath.Join(serviceDir, "GetEmployee.graphql"), []byte("query GetEmployee { employee { name } }"), 0644)) + + // Discover services + services, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: tmpDir, + Logger: zap.NewNop(), + }) + + require.NoError(t, err) + require.Len(t, services, 1) + + service := services[0] + assert.Equal(t, "employee.v1", service.Package) + assert.Equal(t, "EmployeeService", service.ServiceName) + assert.Equal(t, "employee.v1.EmployeeService", service.FullName) + assert.Equal(t, serviceDir, service.ServiceDir) + assert.Len(t, service.ProtoFiles, 1) + }) + + t.Run("discovers multiple services at same level", func(t *testing.T) { + tmpDir := t.TempDir() + + // Create employee service + employeeDir := filepath.Join(tmpDir, "employee.v1") + require.NoError(t, os.MkdirAll(employeeDir, 0755)) + employeeProto := `syntax = "proto3"; +package employee.v1; +service EmployeeService { + rpc GetEmployee(GetEmployeeRequest) returns (GetEmployeeResponse); +} +message GetEmployeeRequest { int32 id = 1; } +message GetEmployeeResponse { string name = 1; } +` + require.NoError(t, os.WriteFile(filepath.Join(employeeDir, "employee.proto"), []byte(employeeProto), 0644)) + + // Create product service + productDir := filepath.Join(tmpDir, "product.v1") + require.NoError(t, os.MkdirAll(productDir, 0755)) + productProto := `syntax = "proto3"; +package product.v1; +service ProductService { + rpc GetProduct(GetProductRequest) returns (GetProductResponse); +} +message GetProductRequest { int32 id = 1; } +message GetProductResponse { string name = 1; } +` + require.NoError(t, os.WriteFile(filepath.Join(productDir, "product.proto"), []byte(productProto), 0644)) + + // Discover services + services, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: tmpDir, + Logger: zap.NewNop(), + }) + + require.NoError(t, err) + require.Len(t, services, 2) + + // Verify both services were discovered + serviceNames := make(map[string]bool) + for _, svc := range services { + serviceNames[svc.FullName] = true + } + assert.True(t, serviceNames["employee.v1.EmployeeService"]) + assert.True(t, serviceNames["product.v1.ProductService"]) + }) + + t.Run("discovers nested services", func(t *testing.T) { + tmpDir := t.TempDir() + + // Create nested structure: services/company/employee.v1/ + companyDir := filepath.Join(tmpDir, "company") + employeeDir := filepath.Join(companyDir, "employee.v1") + require.NoError(t, os.MkdirAll(employeeDir, 0755)) + + protoContent := `syntax = "proto3"; +package employee.v1; +service EmployeeService { + rpc GetEmployee(GetEmployeeRequest) returns (GetEmployeeResponse); +} +message GetEmployeeRequest { int32 id = 1; } +message GetEmployeeResponse { string name = 1; } +` + require.NoError(t, os.WriteFile(filepath.Join(employeeDir, "employee.proto"), []byte(protoContent), 0644)) + + // Discover services + services, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: tmpDir, + Logger: zap.NewNop(), + }) + + require.NoError(t, err) + require.Len(t, services, 1) + assert.Equal(t, "employee.v1.EmployeeService", services[0].FullName) + }) + + t.Run("stops at first proto and does not discover nested protos (ADR compliance)", func(t *testing.T) { + tmpDir := t.TempDir() + + // Create parent service directory with proto + parentDir := filepath.Join(tmpDir, "employee.v1") + require.NoError(t, os.MkdirAll(parentDir, 0755)) + + parentProto := `syntax = "proto3"; +package employee.v1; +service EmployeeService { + rpc GetEmployee(GetEmployeeRequest) returns (GetEmployeeResponse); +} +message GetEmployeeRequest { int32 id = 1; } +message GetEmployeeResponse { string name = 1; } +` + require.NoError(t, os.WriteFile(filepath.Join(parentDir, "employee.proto"), []byte(parentProto), 0644)) + + // Create nested directory with another proto (should NOT be discovered) + nestedDir := filepath.Join(parentDir, "nested") + require.NoError(t, os.MkdirAll(nestedDir, 0755)) + + nestedProto := `syntax = "proto3"; +package nested.v1; +service NestedService { + rpc GetNested(GetNestedRequest) returns (GetNestedResponse); +} +message GetNestedRequest { int32 id = 1; } +message GetNestedResponse { string name = 1; } +` + require.NoError(t, os.WriteFile(filepath.Join(nestedDir, "nested.proto"), []byte(nestedProto), 0644)) + + // Discover services + services, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: tmpDir, + Logger: zap.NewNop(), + }) + + require.NoError(t, err) + // Should only find the parent service, not the nested one + require.Len(t, services, 1, "Should only discover parent service, not nested proto") + assert.Equal(t, "employee.v1.EmployeeService", services[0].FullName) + assert.Equal(t, parentDir, services[0].ServiceDir) + }) + + t.Run("discovers operations in subdirectories of service", func(t *testing.T) { + tmpDir := t.TempDir() + serviceDir := filepath.Join(tmpDir, "employee.v1") + require.NoError(t, os.MkdirAll(serviceDir, 0755)) + + // Create proto file + protoContent := `syntax = "proto3"; +package employee.v1; +service EmployeeService { + rpc GetEmployee(GetEmployeeRequest) returns (GetEmployeeResponse); +} +message GetEmployeeRequest { int32 id = 1; } +message GetEmployeeResponse { string name = 1; } +` + require.NoError(t, os.WriteFile(filepath.Join(serviceDir, "employee.proto"), []byte(protoContent), 0644)) + + // Create operations in subdirectories + queriesDir := filepath.Join(serviceDir, "operations", "queries") + mutationsDir := filepath.Join(serviceDir, "operations", "mutations") + require.NoError(t, os.MkdirAll(queriesDir, 0755)) + require.NoError(t, os.MkdirAll(mutationsDir, 0755)) + + require.NoError(t, os.WriteFile(filepath.Join(queriesDir, "GetEmployee.graphql"), []byte("query GetEmployee { employee { name } }"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(mutationsDir, "UpdateEmployee.graphql"), []byte("mutation UpdateEmployee { updateEmployee { name } }"), 0644)) + + // Discover services + services, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: tmpDir, + Logger: zap.NewNop(), + }) + + require.NoError(t, err) + require.Len(t, services, 1) + + // Verify operations can be found in subdirectories + operations, err := findOperationFiles(serviceDir) + require.NoError(t, err) + assert.Len(t, operations, 2, "Should find operations in subdirectories") + }) + + t.Run("enforces one proto file per directory", func(t *testing.T) { + tmpDir := t.TempDir() + serviceDir := filepath.Join(tmpDir, "employee.v1") + require.NoError(t, os.MkdirAll(serviceDir, 0755)) + + // Create two proto files in the same directory + proto1 := `syntax = "proto3"; +package employee.v1; +service EmployeeService { + rpc GetEmployee(GetEmployeeRequest) returns (GetEmployeeResponse); +} +message GetEmployeeRequest { int32 id = 1; } +message GetEmployeeResponse { string name = 1; } +` + proto2 := `syntax = "proto3"; +package employee.v1; +service AnotherService { + rpc GetAnother(GetAnotherRequest) returns (GetAnotherResponse); +} +message GetAnotherRequest { int32 id = 1; } +message GetAnotherResponse { string name = 1; } +` + require.NoError(t, os.WriteFile(filepath.Join(serviceDir, "employee.proto"), []byte(proto1), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(serviceDir, "another.proto"), []byte(proto2), 0644)) + + // Discover services - should fail + _, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: tmpDir, + Logger: zap.NewNop(), + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "only one proto file is allowed per directory") + }) + + t.Run("validates unique package.service combinations", func(t *testing.T) { + tmpDir := t.TempDir() + + // Create two directories with the same package.service + dir1 := filepath.Join(tmpDir, "service1") + dir2 := filepath.Join(tmpDir, "service2") + require.NoError(t, os.MkdirAll(dir1, 0755)) + require.NoError(t, os.MkdirAll(dir2, 0755)) + + // Same proto content in both directories + protoContent := `syntax = "proto3"; +package employee.v1; +service EmployeeService { + rpc GetEmployee(GetEmployeeRequest) returns (GetEmployeeResponse); +} +message GetEmployeeRequest { int32 id = 1; } +message GetEmployeeResponse { string name = 1; } +` + require.NoError(t, os.WriteFile(filepath.Join(dir1, "employee.proto"), []byte(protoContent), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(dir2, "employee.proto"), []byte(protoContent), 0644)) + + // Discover services - should fail due to duplicate + _, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: tmpDir, + Logger: zap.NewNop(), + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "duplicate service") + assert.Contains(t, err.Error(), "employee.v1.EmployeeService") + }) + + t.Run("returns error when no services found", func(t *testing.T) { + tmpDir := t.TempDir() + + // Create empty directory + _, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: tmpDir, + Logger: zap.NewNop(), + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "no services found") + }) + + t.Run("returns error when services directory does not exist", func(t *testing.T) { + _, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: "/nonexistent/directory", + Logger: zap.NewNop(), + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "does not exist") + }) + + t.Run("returns error when proto has no package declaration", func(t *testing.T) { + tmpDir := t.TempDir() + serviceDir := filepath.Join(tmpDir, "employee.v1") + require.NoError(t, os.MkdirAll(serviceDir, 0755)) + + // Proto without package declaration + protoContent := `syntax = "proto3"; +service EmployeeService { + rpc GetEmployee(GetEmployeeRequest) returns (GetEmployeeResponse); +} +message GetEmployeeRequest { int32 id = 1; } +message GetEmployeeResponse { string name = 1; } +` + require.NoError(t, os.WriteFile(filepath.Join(serviceDir, "employee.proto"), []byte(protoContent), 0644)) + + _, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: tmpDir, + Logger: zap.NewNop(), + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "no package declaration found") + }) + + t.Run("returns error when proto has no service declaration", func(t *testing.T) { + tmpDir := t.TempDir() + serviceDir := filepath.Join(tmpDir, "employee.v1") + require.NoError(t, os.MkdirAll(serviceDir, 0755)) + + // Proto without service declaration + protoContent := `syntax = "proto3"; +package employee.v1; +message GetEmployeeRequest { int32 id = 1; } +message GetEmployeeResponse { string name = 1; } +` + require.NoError(t, os.WriteFile(filepath.Join(serviceDir, "employee.proto"), []byte(protoContent), 0644)) + + _, err := DiscoverServices(ServiceDiscoveryConfig{ + ServicesDir: tmpDir, + Logger: zap.NewNop(), + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "no service declaration found") + }) +} diff --git a/router/pkg/connectrpc/testdata/test.proto b/router/pkg/connectrpc/testdata/test.proto new file mode 100644 index 0000000000..8669f1ed5d --- /dev/null +++ b/router/pkg/connectrpc/testdata/test.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package test; + +// Mood represents an employee's mood +enum Mood { + MOOD_UNSPECIFIED = 0; + MOOD_HAPPY = 1; + MOOD_SAD = 2; + MOOD_NEUTRAL = 3; +} + +// Status represents an employee's status +enum Status { + STATUS_UNSPECIFIED = 0; + STATUS_ACTIVE = 1; + STATUS_INACTIVE = 2; +} + +// GetEmployeeRequest is the request message for GetEmployee +message GetEmployeeRequest { + string name = 1; + Mood mood = 2; + Status status = 3; + string employee_id = 4; +} + +// GetEmployeeResponse is the response message for GetEmployee +message GetEmployeeResponse { + string name = 1; + Mood mood = 2; +} + +// EmployeeService provides employee operations +service EmployeeService { + rpc GetEmployee(GetEmployeeRequest) returns (GetEmployeeResponse); +} diff --git a/router/pkg/connectrpc/vanguard_service.go b/router/pkg/connectrpc/vanguard_service.go new file mode 100644 index 0000000000..96b61f4951 --- /dev/null +++ b/router/pkg/connectrpc/vanguard_service.go @@ -0,0 +1,393 @@ +package connectrpc + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "connectrpc.com/connect" + "connectrpc.com/vanguard" + "go.uber.org/zap" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/dynamicpb" +) + +// VanguardServiceConfig holds configuration for creating a Vanguard service +type VanguardServiceConfig struct { + Handler *RPCHandler + ProtoLoader *ProtoLoader + Logger *zap.Logger +} + +// VanguardService wraps the RPC handler and creates Vanguard services that enable +// protocol-agnostic RPC handling. It uses the connectrpc.com/vanguard package to provide +// automatic transcoding between different RPC protocols (gRPC, gRPC-Web, Connect, and REST) +// and message formats (Protocol Buffers binary, JSON, etc.). +// +// The service acts as a protocol adapter that: +// - Accepts requests in any supported RPC protocol (gRPC, gRPC-Web, Connect, REST) +// - Transcodes incoming requests to Connect protocol with JSON encoding +// - Forwards the normalized request to the underlying RPCHandler for GraphQL execution +// - Transcodes the response back to the client's original protocol and format +// +// This allows clients to use their preferred RPC protocol while the router internally +// processes all requests uniformly as Connect+JSON, simplifying the handler implementation +// and enabling protocol interoperability. +type VanguardService struct { + handler *RPCHandler + protoLoader *ProtoLoader + logger *zap.Logger + services []*vanguard.Service +} + +// NewVanguardService creates a new Vanguard service wrapper +func NewVanguardService(config VanguardServiceConfig) (*VanguardService, error) { + if config.Handler == nil { + return nil, fmt.Errorf("handler cannot be nil") + } + + if config.ProtoLoader == nil { + return nil, fmt.Errorf("proto loader cannot be nil") + } + + if config.Logger == nil { + config.Logger = zap.NewNop() + } + + vs := &VanguardService{ + handler: config.Handler, + protoLoader: config.ProtoLoader, + logger: config.Logger, + } + + // Register all proto services with Vanguard + if err := vs.registerServices(); err != nil { + return nil, fmt.Errorf("failed to register services: %w", err) + } + + return vs, nil +} + +// registerServices creates Vanguard services for all proto services +func (vs *VanguardService) registerServices() error { + protoServices := vs.protoLoader.GetServices() + if len(protoServices) == 0 { + return fmt.Errorf("no proto services found") + } + + // Create a custom type resolver from our Files registry + // This avoids using the global registry + files := vs.protoLoader.GetFiles() + customTypes := dynamicpb.NewTypes(files) + + vs.services = make([]*vanguard.Service, 0, len(protoServices)) + + // Collect aggregate statistics + totalMethods := 0 + uniquePackages := make(map[string]bool) + for _, serviceDef := range protoServices { + totalMethods += len(serviceDef.Methods) + uniquePackages[serviceDef.Package] = true + } + + // Log aggregate summary at Info level + vs.logger.Info("registering services", + zap.Int("package_count", len(uniquePackages)), + zap.Int("service_count", len(protoServices)), + zap.Int("total_methods", totalMethods)) + + for serviceName, serviceDef := range protoServices { + vs.logger.Debug("registering service", + zap.String("service_name", serviceName), + zap.String("full_name", serviceDef.FullName), + zap.Int("method_count", len(serviceDef.Methods))) + + // Log all methods for this service at Debug level + for _, method := range serviceDef.Methods { + vs.logger.Debug("service method", + zap.String("service", serviceName), + zap.String("method", method.Name), + zap.String("input_type", method.InputType), + zap.String("output_type", method.OutputType)) + } + + // Create an HTTP handler for this service + // The handler will receive requests at paths like: /Method (without the service prefix) + serviceHandler := vs.createServiceHandler(serviceName, serviceDef) + + // Use NewServiceWithSchema with custom type resolver + // This avoids relying on the global registry + servicePath := "/" + serviceName + "/" + + vs.logger.Debug("creating service with custom type resolver", + zap.String("service_path", servicePath)) + + // Configure to always transcode to Connect protocol with JSON codec + // This ensures our handler always receives JSON, regardless of the incoming protocol + // Use NewServiceWithSchema to provide the schema directly with a custom type resolver + vanguardService := vanguard.NewServiceWithSchema( + serviceDef.ServiceDescriptor, + serviceHandler, + vanguard.WithTargetProtocols(vanguard.ProtocolConnect), + vanguard.WithTargetCodecs("json"), + vanguard.WithTypeResolver(customTypes), + ) + + vs.services = append(vs.services, vanguardService) + + vs.logger.Debug("registered service successfully with custom type resolver", + zap.String("service", serviceName), + zap.String("service_path", servicePath), + zap.String("target_protocol", "connect"), + zap.String("target_codec", "json")) + } + + return nil +} + +// createServiceHandler creates an HTTP handler for a specific proto service +// This handler is wrapped by Vanguard, which handles protocol transcoding +func (vs *VanguardService) createServiceHandler(serviceName string, serviceDef *ServiceDefinition) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Extract and validate method name from path + methodName := vs.extractMethodName(r.URL.Path, serviceName) + if methodName == "" { + // Return Connect error for invalid path + connectErr := connect.NewError(connect.CodeNotFound, fmt.Errorf("invalid path format")) + vs.writeConnectError(w, connectErr, serviceName, methodName) + return + } + + // Validate method exists in service + methodExists := false + for _, method := range serviceDef.Methods { + if method.Name == methodName { + methodExists = true + break + } + } + + if !methodExists { + // Return Connect error for method not found + connectErr := connect.NewError(connect.CodeNotFound, fmt.Errorf("method not found: %s", methodName)) + vs.writeConnectError(w, connectErr, serviceName, methodName) + return + } + + // For GET requests (Connect protocol), extract message from query parameter + // For POST requests, read from body + var requestBody []byte + var err error + + if r.Method == "GET" { + // Extract the 'message' query parameter (Connect protocol for GET requests) + messageParam := r.URL.Query().Get("message") + if messageParam == "" { + // For methods with no input parameters, use empty JSON object + requestBody = []byte("{}") + } else { + // The message parameter is already URL-decoded by r.URL.Query().Get() + requestBody = []byte(messageParam) + } + vs.logger.Debug("extracted message from GET query parameter", + zap.String("message", string(requestBody))) + } else { + // Read request body (JSON for POST requests) + requestBody, err = io.ReadAll(r.Body) + if err != nil { + vs.logger.Error("failed to read request body", zap.Error(err)) + connectErr := connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("failed to read request")) + vs.writeConnectError(w, connectErr, serviceName, methodName) + return + } + } + + // Add headers to context for forwarding to GraphQL + ctx := withRequestHeaders(r.Context(), r.Header) + + // Handle the RPC request + responseBody, err := vs.handler.HandleRPC(ctx, serviceName, methodName, requestBody) + if err != nil { + // Check if this is already a Connect error + var connectErr *connect.Error + if errors.As(err, &connectErr) { + vs.writeConnectError(w, connectErr, serviceName, methodName) + } else { + // Log the original error with full details for diagnostics + vs.logger.Error("internal error during RPC handling", + zap.String("service", serviceName), + zap.String("method", methodName), + zap.Error(err)) + + // Return a sanitized error to the client to avoid leaking internal details + connectErr := connect.NewError(connect.CodeInternal, fmt.Errorf("internal server error")) + vs.writeConnectError(w, connectErr, serviceName, methodName) + } + return + } + + // Write JSON response (will be transcoded to client's protocol by Vanguard) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if _, err := w.Write(responseBody); err != nil { + vs.logger.Error("failed to write response", zap.Error(err)) + } + }) +} + +// writeConnectError writes a Connect error response in JSON format +// This ensures proper error formatting for the Connect protocol +func (vs *VanguardService) writeConnectError(w http.ResponseWriter, connectErr *connect.Error, serviceName, methodName string) { + statusCode := ConnectCodeToHTTPStatus(connectErr.Code()) + + vs.logger.Error("RPC handler error", + zap.String("service", serviceName), + zap.String("method", methodName), + zap.String("connect_code", connectErr.Code().String()), + zap.Int("http_status", statusCode), + zap.String("error", connectErr.Message())) + + // Format error as Connect JSON error response + // Connect protocol error format: {"code": "invalid_argument", "message": "error message"} + errorResponse := map[string]any{ + "code": connectErr.Code().String(), + "message": connectErr.Message(), + } + + // Check if this error contains GraphQL errors in metadata + // If so, include them in a structured format for better error reporting + if graphqlErrorsJSON := connectErr.Meta().Values(MetaKeyGraphQLErrors); len(graphqlErrorsJSON) > 0 { + // Parse the GraphQL errors JSON from metadata + var graphqlErrors []GraphQLError + if err := json.Unmarshal([]byte(graphqlErrorsJSON[0]), &graphqlErrors); err == nil && len(graphqlErrors) > 0 { + // Include GraphQL errors in the response for better debugging + errorResponse["graphql_errors"] = graphqlErrors + + // If there are multiple GraphQL errors, update the message to indicate this + if len(graphqlErrors) > 1 { + errorResponse["message"] = fmt.Sprintf("%s (and %d more errors)", connectErr.Message(), len(graphqlErrors)-1) + } + } + } + + // Add other metadata if present (excluding graphql_errors which we handled above) + // Note: We don't add a "details" field here because Vanguard handles that internally + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + + if err := json.NewEncoder(w).Encode(errorResponse); err != nil { + vs.logger.Error("failed to write error response", zap.Error(err)) + } +} + +// extractMethodName extracts the method name from the request path +// Expected format: /package.Service/Method or package.Service/Method +func (vs *VanguardService) extractMethodName(path, serviceName string) string { + // Remove leading slash if present + path = strings.TrimPrefix(path, "/") + + // Expected format: package.Service/Method + // Split by the service name + parts := strings.Split(path, "/") + if len(parts) != 2 { + return "" + } + + // Verify the service name matches + if parts[0] != serviceName { + return "" + } + + return parts[1] +} + +// GetServices returns all registered Vanguard services +func (vs *VanguardService) GetServices() []*vanguard.Service { + return vs.services +} + +// GetServiceCount returns the number of registered services +func (vs *VanguardService) GetServiceCount() int { + return len(vs.services) +} + +// GetServiceNames returns the names of all registered services +func (vs *VanguardService) GetServiceNames() []string { + names := make([]string, 0, len(vs.services)) + for serviceName := range vs.protoLoader.GetServices() { + names = append(names, serviceName) + } + return names +} + +// ValidateService checks if a service exists +func (vs *VanguardService) ValidateService(serviceName string) error { + if _, ok := vs.protoLoader.GetService(serviceName); !ok { + return fmt.Errorf("service not found: %s", serviceName) + } + return nil +} + +// ValidateMethod checks if a method exists in a service +func (vs *VanguardService) ValidateMethod(serviceName, methodName string) error { + _, err := vs.protoLoader.GetMethod(serviceName, methodName) + if err != nil { + return fmt.Errorf("method not found: %w", err) + } + return nil +} + +// GetMethodInfo returns information about a specific method +func (vs *VanguardService) GetMethodInfo(serviceName, methodName string) (*MethodDefinition, error) { + method, err := vs.protoLoader.GetMethod(serviceName, methodName) + if err != nil { + return nil, fmt.Errorf("method not found: %w", err) + } + return method, nil +} + +// GetServiceInfo returns information about a specific service +func (vs *VanguardService) GetServiceInfo(serviceName string) (*ServiceInfo, error) { + serviceDef, ok := vs.protoLoader.GetService(serviceName) + if !ok { + return nil, fmt.Errorf("service not found: %s", serviceName) + } + + info := &ServiceInfo{ + FullName: serviceName, + ServiceName: serviceDef.ServiceName, + Methods: make([]string, 0, len(serviceDef.Methods)), + } + + for _, method := range serviceDef.Methods { + info.Methods = append(info.Methods, method.Name) + } + + return info, nil +} + +// GetFileDescriptors returns all unique file descriptors from the proto loader +func (vs *VanguardService) GetFileDescriptors() []protoreflect.FileDescriptor { + descriptors := make([]protoreflect.FileDescriptor, 0) + seen := make(map[string]bool) + + for _, service := range vs.protoLoader.GetServices() { + path := service.FileDescriptor.Path() + if !seen[path] { + seen[path] = true + descriptors = append(descriptors, service.FileDescriptor) + } + } + return descriptors +} + +// ServiceInfo contains metadata about a service +type ServiceInfo struct { + FullName string `json:"fullName"` + ServiceName string `json:"serviceName"` + Methods []string `json:"methods"` +} diff --git a/router/pkg/connectrpc/vanguard_service_test.go b/router/pkg/connectrpc/vanguard_service_test.go new file mode 100644 index 0000000000..f7a5af2c25 --- /dev/null +++ b/router/pkg/connectrpc/vanguard_service_test.go @@ -0,0 +1,376 @@ +package connectrpc + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/wundergraph/cosmo/router/pkg/schemaloader" +) + +func TestNewVanguardService(t *testing.T) { + t.Run("creates service successfully", func(t *testing.T) { + protoLoader := GetSharedProtoLoader(t, "samples/services/employee.v1") + handler := NewTestRPCHandler(t, protoLoader) + + vs, err := NewVanguardService(VanguardServiceConfig{ + Handler: handler, + ProtoLoader: protoLoader, + Logger: zap.NewNop(), + }) + + require.NoError(t, err) + assert.NotNil(t, vs) + assert.Equal(t, 1, vs.GetServiceCount(), "Should have exactly 1 service from employee_only directory") + }) + + t.Run("fails with nil handler", func(t *testing.T) { + protoLoader := GetSharedProtoLoader(t, "samples/services/employee.v1") + + _, err := NewVanguardService(VanguardServiceConfig{ + Handler: nil, + ProtoLoader: protoLoader, + Logger: zap.NewNop(), + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "handler cannot be nil") + }) + + t.Run("fails with nil proto loader", func(t *testing.T) { + handler := &RPCHandler{} + + _, err := NewVanguardService(VanguardServiceConfig{ + Handler: handler, + ProtoLoader: nil, + Logger: zap.NewNop(), + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "proto loader cannot be nil") + }) + + t.Run("uses nop logger when nil", func(t *testing.T) { + protoLoader := GetSharedProtoLoader(t, "samples/services/employee.v1") + handler := NewTestRPCHandler(t, protoLoader) + + vs, err := NewVanguardService(VanguardServiceConfig{ + Handler: handler, + ProtoLoader: protoLoader, + Logger: nil, + }) + + require.NoError(t, err) + assert.NotNil(t, vs) + assert.NotNil(t, vs.logger) + }) + + t.Run("fails with no proto services", func(t *testing.T) { + // Create empty proto loader + protoLoader := NewProtoLoader(zap.NewNop()) + + handler := &RPCHandler{} + + _, err := NewVanguardService(VanguardServiceConfig{ + Handler: handler, + ProtoLoader: protoLoader, + Logger: zap.NewNop(), + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "no proto services found") + }) +} + +func TestVanguardService_ValidateService(t *testing.T) { + protoLoader := GetSharedProtoLoader(t, "samples/services/employee.v1") + handler := NewTestRPCHandler(t, protoLoader) + + vs, err := NewVanguardService(VanguardServiceConfig{ + Handler: handler, + ProtoLoader: protoLoader, + Logger: zap.NewNop(), + }) + require.NoError(t, err) + + t.Run("validates existing service", func(t *testing.T) { + err := vs.ValidateService("employee.v1.EmployeeService") + assert.NoError(t, err) + }) + + t.Run("fails for non-existent service", func(t *testing.T) { + err := vs.ValidateService("example.NonExistent") + require.Error(t, err) + assert.Contains(t, err.Error(), "service not found") + }) +} + +func TestVanguardService_ValidateMethod(t *testing.T) { + protoLoader := GetSharedProtoLoader(t, "samples/services/employee.v1") + handler := NewTestRPCHandler(t, protoLoader) + + vs, err := NewVanguardService(VanguardServiceConfig{ + Handler: handler, + ProtoLoader: protoLoader, + Logger: zap.NewNop(), + }) + require.NoError(t, err) + + t.Run("validates existing method", func(t *testing.T) { + err := vs.ValidateMethod("employee.v1.EmployeeService", "GetEmployeeById") + assert.NoError(t, err) + }) + + t.Run("fails for non-existent method", func(t *testing.T) { + err := vs.ValidateMethod("employee.v1.EmployeeService", "NonExistent") + require.Error(t, err) + assert.Contains(t, err.Error(), "method not found") + }) + + t.Run("fails for non-existent service", func(t *testing.T) { + err := vs.ValidateMethod("example.NonExistent", "QueryGetUser") + require.Error(t, err) + assert.Contains(t, err.Error(), "method not found") + }) +} + +func TestVanguardService_GetMethodInfo(t *testing.T) { + protoLoader := GetSharedProtoLoader(t, "samples/services/employee.v1") + handler := NewTestRPCHandler(t, protoLoader) + + vs, err := NewVanguardService(VanguardServiceConfig{ + Handler: handler, + ProtoLoader: protoLoader, + Logger: zap.NewNop(), + }) + require.NoError(t, err) + + t.Run("gets method info successfully", func(t *testing.T) { + info, err := vs.GetMethodInfo("employee.v1.EmployeeService", "GetEmployeeById") + require.NoError(t, err) + assert.NotNil(t, info) + assert.Equal(t, "GetEmployeeById", info.Name) + }) + + t.Run("fails for non-existent method", func(t *testing.T) { + _, err := vs.GetMethodInfo("employee.v1.EmployeeService", "NonExistent") + require.Error(t, err) + assert.Contains(t, err.Error(), "method not found") + }) +} + +func TestVanguardService_GetServiceInfo(t *testing.T) { + protoLoader := GetSharedProtoLoader(t, "samples/services/employee.v1") + handler := NewTestRPCHandler(t, protoLoader) + + vs, err := NewVanguardService(VanguardServiceConfig{ + Handler: handler, + ProtoLoader: protoLoader, + Logger: zap.NewNop(), + }) + require.NoError(t, err) + + t.Run("gets service info successfully", func(t *testing.T) { + info, err := vs.GetServiceInfo("employee.v1.EmployeeService") + require.NoError(t, err) + assert.NotNil(t, info) + assert.Equal(t, "employee.v1.EmployeeService", info.FullName) + assert.Equal(t, "EmployeeService", info.ServiceName) + assert.Contains(t, info.Methods, "GetEmployeeById") + }) + + t.Run("fails for non-existent service", func(t *testing.T) { + _, err := vs.GetServiceInfo("example.NonExistent") + require.Error(t, err) + assert.Contains(t, err.Error(), "service not found") + }) +} + +func TestVanguardService_ServiceHandler(t *testing.T) { + protoLoader := GetSharedProtoLoader(t, "samples/services/employee.v1") + handler := NewTestRPCHandler(t, protoLoader) + + vs, err := NewVanguardService(VanguardServiceConfig{ + Handler: handler, + ProtoLoader: protoLoader, + Logger: zap.NewNop(), + }) + require.NoError(t, err) + + t.Run("handles valid request", func(t *testing.T) { + // Create a mock GraphQL server + graphqlServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"employee":{"id":1,"name":"Test Employee"}}}`)) + })) + defer graphqlServer.Close() + + // Create handler with mock server + protoLoader := GetSharedProtoLoader(t, "samples/services/employee.v1") + + // Build operations map with service-scoped approach before creating registry + serviceName := "employee.v1.EmployeeService" + operations := map[string]map[string]*schemaloader.Operation{ + serviceName: { + "GetEmployeeById": &schemaloader.Operation{ + Name: "GetEmployeeById", + OperationType: "query", + OperationString: "query GetEmployeeById($id: Int!) { employee(id: $id) { id name } }", + }, + }, + } + opRegistry := NewOperationRegistry(operations) + + handler, err := NewRPCHandler(HandlerConfig{ + GraphQLEndpoint: graphqlServer.URL, + HTTPClient: &http.Client{}, + Logger: zap.NewNop(), + OperationRegistry: opRegistry, + ProtoLoader: protoLoader, + }) + require.NoError(t, err) + + // Get service definition + services := protoLoader.GetServices() + require.NotEmpty(t, services, "Should have at least one service") + var serviceDef *ServiceDefinition + for _, svc := range services { + serviceDef = svc + break + } + require.NotNil(t, serviceDef) + + vs, err := NewVanguardService(VanguardServiceConfig{ + Handler: handler, + ProtoLoader: protoLoader, + Logger: zap.NewNop(), + }) + require.NoError(t, err) + + // Create a test request + requestBody := map[string]any{ + "id": 1, + } + requestJSON, err := json.Marshal(requestBody) + require.NoError(t, err) + + req := httptest.NewRequest("POST", "/employee.v1.EmployeeService/GetEmployeeById", bytes.NewReader(requestJSON)) + req.Header.Set("Content-Type", "application/json") + + // Create a response recorder + w := httptest.NewRecorder() + + // Get the service handler + serviceHandler := vs.createServiceHandler("employee.v1.EmployeeService", serviceDef) + + // Handle the request + serviceHandler.ServeHTTP(w, req) + + // Check response + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, "application/json", w.Header().Get("Content-Type")) + + // Verify response body is valid JSON + var response map[string]any + err = json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + }) + + t.Run("returns 404 for unknown method name", func(t *testing.T) { + services := protoLoader.GetServices() + require.NotEmpty(t, services) + var serviceDef *ServiceDefinition + for _, svc := range services { + serviceDef = svc + break + } + require.NotNil(t, serviceDef) + + // Valid service name but non-existent method + req := httptest.NewRequest("POST", "/employee.v1.EmployeeService/NonExistentMethod", nil) + w := httptest.NewRecorder() + + serviceHandler := vs.createServiceHandler("employee.v1.EmployeeService", serviceDef) + serviceHandler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusNotFound, w.Code) + assert.Contains(t, w.Body.String(), "method not found") + }) + + t.Run("returns 404 for wrong service name with valid method", func(t *testing.T) { + services := protoLoader.GetServices() + require.NotEmpty(t, services) + var serviceDef *ServiceDefinition + for _, svc := range services { + serviceDef = svc + break + } + require.NotNil(t, serviceDef) + + // Wrong service name but valid method name - should fail service validation + req := httptest.NewRequest("POST", "/wrong.Service/GetEmployeeById", nil) + w := httptest.NewRecorder() + + serviceHandler := vs.createServiceHandler("employee.v1.EmployeeService", serviceDef) + serviceHandler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusNotFound, w.Code) + assert.Contains(t, w.Body.String(), "invalid path format") + }) + + t.Run("returns 404 for invalid path format with too many parts", func(t *testing.T) { + services := protoLoader.GetServices() + require.NotEmpty(t, services) + var serviceDef *ServiceDefinition + for _, svc := range services { + serviceDef = svc + break + } + require.NotNil(t, serviceDef) + + // Path with too many segments + req := httptest.NewRequest("POST", "/employee.v1.EmployeeService/GetEmployeeById/extra", nil) + w := httptest.NewRecorder() + + serviceHandler := vs.createServiceHandler("employee.v1.EmployeeService", serviceDef) + serviceHandler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusNotFound, w.Code) + assert.Contains(t, w.Body.String(), "invalid path format") + }) + + t.Run("handles request body read error", func(t *testing.T) { + services := protoLoader.GetServices() + require.NotEmpty(t, services) + var serviceDef *ServiceDefinition + for _, svc := range services { + serviceDef = svc + break + } + require.NotNil(t, serviceDef) + + // Create a request with a body that will error on read + req := httptest.NewRequest("POST", "/employee.v1.EmployeeService/GetEmployeeById", &errorReader{}) + w := httptest.NewRecorder() + + serviceHandler := vs.createServiceHandler("employee.v1.EmployeeService", serviceDef) + serviceHandler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + }) +} + +// errorReader is a reader that always returns an error +type errorReader struct{} + +func (e *errorReader) Read(p []byte) (n int, err error) { + return 0, io.ErrUnexpectedEOF +} diff --git a/router/pkg/mcpserver/server.go b/router/pkg/mcpserver/server.go index 075ba20c42..72e4c82445 100644 --- a/router/pkg/mcpserver/server.go +++ b/router/pkg/mcpserver/server.go @@ -17,6 +17,7 @@ import ( "github.com/mark3labs/mcp-go/mcp" "github.com/mark3labs/mcp-go/server" "github.com/santhosh-tekuri/jsonschema/v6" + "github.com/wundergraph/cosmo/router/internal/headers" "github.com/wundergraph/cosmo/router/pkg/cors" "github.com/wundergraph/cosmo/router/pkg/schemaloader" "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" @@ -39,28 +40,6 @@ func requestHeadersFromRequest(ctx context.Context, r *http.Request) context.Con return withRequestHeaders(ctx, headers) } -var skippedHeaders = map[string]struct{}{ - "Connection": {}, - "Keep-Alive": {}, - "Proxy-Authenticate": {}, - "Proxy-Authorization": {}, - "Te": {}, - "Trailer": {}, - "Transfer-Encoding": {}, - "Upgrade": {}, - "Host": {}, - "Content-Length": {}, - "Content-Type": {}, - "Accept": {}, - "Accept-Encoding": {}, - "Accept-Charset": {}, - "Alt-Svc": {}, - "Proxy-Connection": {}, - "Sec-Websocket-Extensions": {}, - "Sec-Websocket-Key": {}, - "Sec-Websocket-Protocol": {}, - "Sec-Websocket-Version": {}, -} // headersFromContext extracts the request headers from the context. func headersFromContext(ctx context.Context) (http.Header, error) { @@ -731,14 +710,14 @@ func (s *GraphQLSchemaServer) executeGraphQLQuery(ctx context.Context, query str // Forward all headers from the original MCP request to the GraphQL server // The router's header forwarding rules will then determine what gets sent to subgraphs - headers, err := headersFromContext(ctx) + reqHeaders, err := headersFromContext(ctx) if err != nil { s.logger.Debug("failed to get headers from context", zap.Error(err)) } else { // Copy all headers from the MCP request - for key, values := range headers { + for key, values := range reqHeaders { // Skip headers that should not be forwarded - if _, ok := skippedHeaders[key]; ok { + if _, ok := headers.SkippedHeaders[key]; ok { continue } for _, value := range values { diff --git a/router/pkg/schemaloader/loader.go b/router/pkg/schemaloader/loader.go index cd3f53dad8..fbb0f7dcae 100644 --- a/router/pkg/schemaloader/loader.go +++ b/router/pkg/schemaloader/loader.go @@ -80,7 +80,7 @@ func (l *OperationLoader) LoadOperationsFromDirectory(dirPath string) ([]Operati } // Extract the operation name and type - opName, opType, err := getOperationNameAndType(&opDoc) + opName, opType, err := GetOperationNameAndType(&opDoc) if err != nil { l.Logger.Error("Failed to extract MCP operation name and type", zap.String("operation", opName), zap.String("file", path), zap.Error(err)) return nil @@ -160,8 +160,8 @@ func parseOperation(path string, operation string) (ast.Document, error) { return opDoc, nil } -// getOperationNameAndType extracts the name and type of the first operation in a document -func getOperationNameAndType(doc *ast.Document) (string, string, error) { +// GetOperationNameAndType extracts the name and type of the first operation in a document +func GetOperationNameAndType(doc *ast.Document) (string, string, error) { for _, ref := range doc.RootNodes { if ref.Kind == ast.NodeKindOperationDefinition { opDef := doc.OperationDefinitions[ref.Ref]