From 969bddbf2dc9e41a3808b1eca2c51fbb7ef8c81d Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Tue, 27 Jan 2026 15:38:37 +0100 Subject: [PATCH 01/20] feat(#74): implement background daemon with gRPC over UDS and auto-shutdown Implements phase 1 of the daemon architecture: Domain Layer: - Add daemon path constants (socket, PID, log paths) - Add daemon-specific sentinel errors (ErrDaemonNotRunning, etc.) - Add 3-hour inactivity timeout constant Port Interfaces: - Define DaemonClient interface (Ping, Status, Shutdown, Close) - Define DaemonConnector interface (Connect, IsRunning, Spawn) - Define DaemonStatus struct with runtime metrics gRPC Service: - Add daemon.proto with DaemonService (Ping/Status/Shutdown RPCs) - Configure buf for protocol buffer generation - Generate pb.go and grpc.pb.go stubs Adapter Implementation: - Lifecycle manager with time.Timer-based inactivity tracking - gRPC server listening on Unix Domain Socket - gRPC client with automatic connection retry - Background process spawner with detached execution - Graft DI node registration CLI Integration: - Add 'daemon serve' subcommand (hidden, for internal spawning) - Add 'daemon status' subcommand (shows uptime, idle time, PID) - Add 'daemon stop' subcommand (graceful shutdown) - Wire daemon commands into root CLI Testing: - Add lifecycle_test.go with synctest-based timer validation - Test auto-shutdown after inactivity timeout - Test timer reset prevents premature shutdown Infrastructure: - Add grpc, protobuf dependencies to go.mod - Update flake.nix with buf tooling - Register daemon adapter in wiring --- cli/api/daemon/v1/daemon.pb.go | 388 ++++++++++++++++++ cli/api/daemon/v1/daemon.proto | 42 ++ cli/api/daemon/v1/daemon_grpc.pb.go | 204 +++++++++ cli/buf.gen.yaml | 8 + cli/buf.yaml | 7 + cli/cmd/same/commands/commands_test.go | 35 +- cli/cmd/same/commands/daemon.go | 49 +++ cli/cmd/same/commands/root.go | 1 + cli/go.mod | 6 +- cli/go.sum | 16 +- cli/internal/adapters/daemon/client.go | 73 ++++ cli/internal/adapters/daemon/lifecycle.go | 82 ++++ .../adapters/daemon/lifecycle_test.go | 115 ++++++ cli/internal/adapters/daemon/node.go | 21 + cli/internal/adapters/daemon/server.go | 126 ++++++ cli/internal/adapters/daemon/spawner.go | 143 +++++++ cli/internal/app/app.go | 66 ++- cli/internal/app/app_test.go | 36 +- cli/internal/app/node.go | 9 +- cli/internal/core/domain/errors.go | 12 + cli/internal/core/domain/layout.go | 39 +- cli/internal/core/ports/daemon.go | 44 ++ cli/internal/wiring/wiring.go | 1 + flake.nix | 1 + go.work.sum | 36 +- 25 files changed, 1528 insertions(+), 32 deletions(-) create mode 100644 cli/api/daemon/v1/daemon.pb.go create mode 100644 cli/api/daemon/v1/daemon.proto create mode 100644 cli/api/daemon/v1/daemon_grpc.pb.go create mode 100644 cli/buf.gen.yaml create mode 100644 cli/buf.yaml create mode 100644 cli/cmd/same/commands/daemon.go create mode 100644 cli/internal/adapters/daemon/client.go create mode 100644 cli/internal/adapters/daemon/lifecycle.go create mode 100644 cli/internal/adapters/daemon/lifecycle_test.go create mode 100644 cli/internal/adapters/daemon/node.go create mode 100644 cli/internal/adapters/daemon/server.go create mode 100644 cli/internal/adapters/daemon/spawner.go create mode 100644 cli/internal/core/ports/daemon.go diff --git a/cli/api/daemon/v1/daemon.pb.go b/cli/api/daemon/v1/daemon.pb.go new file mode 100644 index 0000000..14d0b36 --- /dev/null +++ b/cli/api/daemon/v1/daemon.pb.go @@ -0,0 +1,388 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: api/daemon/v1/daemon.proto + +package daemonv1 + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PingRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PingRequest) Reset() { + *x = PingRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PingRequest) ProtoMessage() {} + +func (x *PingRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PingRequest.ProtoReflect.Descriptor instead. +func (*PingRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{0} +} + +type PingResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // idle_remaining_seconds is the time remaining before auto-shutdown. + IdleRemainingSeconds int64 `protobuf:"varint,1,opt,name=idle_remaining_seconds,json=idleRemainingSeconds,proto3" json:"idle_remaining_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PingResponse) Reset() { + *x = PingResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PingResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PingResponse) ProtoMessage() {} + +func (x *PingResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PingResponse.ProtoReflect.Descriptor instead. +func (*PingResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{1} +} + +func (x *PingResponse) GetIdleRemainingSeconds() int64 { + if x != nil { + return x.IdleRemainingSeconds + } + return 0 +} + +type StatusRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusRequest) Reset() { + *x = StatusRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusRequest) ProtoMessage() {} + +func (x *StatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead. +func (*StatusRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{2} +} + +type StatusResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Running bool `protobuf:"varint,1,opt,name=running,proto3" json:"running,omitempty"` + Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + UptimeSeconds int64 `protobuf:"varint,3,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` + LastActivityUnix int64 `protobuf:"varint,4,opt,name=last_activity_unix,json=lastActivityUnix,proto3" json:"last_activity_unix,omitempty"` + IdleRemainingSeconds int64 `protobuf:"varint,5,opt,name=idle_remaining_seconds,json=idleRemainingSeconds,proto3" json:"idle_remaining_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse) Reset() { + *x = StatusResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse) ProtoMessage() {} + +func (x *StatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. +func (*StatusResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{3} +} + +func (x *StatusResponse) GetRunning() bool { + if x != nil { + return x.Running + } + return false +} + +func (x *StatusResponse) GetPid() int32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *StatusResponse) GetUptimeSeconds() int64 { + if x != nil { + return x.UptimeSeconds + } + return 0 +} + +func (x *StatusResponse) GetLastActivityUnix() int64 { + if x != nil { + return x.LastActivityUnix + } + return 0 +} + +func (x *StatusResponse) GetIdleRemainingSeconds() int64 { + if x != nil { + return x.IdleRemainingSeconds + } + return 0 +} + +type ShutdownRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // graceful indicates whether to wait for in-flight operations. + Graceful bool `protobuf:"varint,1,opt,name=graceful,proto3" json:"graceful,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ShutdownRequest) Reset() { + *x = ShutdownRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ShutdownRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownRequest) ProtoMessage() {} + +func (x *ShutdownRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownRequest.ProtoReflect.Descriptor instead. +func (*ShutdownRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{4} +} + +func (x *ShutdownRequest) GetGraceful() bool { + if x != nil { + return x.Graceful + } + return false +} + +type ShutdownResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ShutdownResponse) Reset() { + *x = ShutdownResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ShutdownResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownResponse) ProtoMessage() {} + +func (x *ShutdownResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownResponse.ProtoReflect.Descriptor instead. +func (*ShutdownResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{5} +} + +func (x *ShutdownResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +var File_api_daemon_v1_daemon_proto protoreflect.FileDescriptor + +const file_api_daemon_v1_daemon_proto_rawDesc = "" + + "\n" + + "\x1aapi/daemon/v1/daemon.proto\x12\tdaemon.v1\"\r\n" + + "\vPingRequest\"D\n" + + "\fPingResponse\x124\n" + + "\x16idle_remaining_seconds\x18\x01 \x01(\x03R\x14idleRemainingSeconds\"\x0f\n" + + "\rStatusRequest\"\xc7\x01\n" + + "\x0eStatusResponse\x12\x18\n" + + "\arunning\x18\x01 \x01(\bR\arunning\x12\x10\n" + + "\x03pid\x18\x02 \x01(\x05R\x03pid\x12%\n" + + "\x0euptime_seconds\x18\x03 \x01(\x03R\ruptimeSeconds\x12,\n" + + "\x12last_activity_unix\x18\x04 \x01(\x03R\x10lastActivityUnix\x124\n" + + "\x16idle_remaining_seconds\x18\x05 \x01(\x03R\x14idleRemainingSeconds\"-\n" + + "\x0fShutdownRequest\x12\x1a\n" + + "\bgraceful\x18\x01 \x01(\bR\bgraceful\",\n" + + "\x10ShutdownResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess2\xcc\x01\n" + + "\rDaemonService\x127\n" + + "\x04Ping\x12\x16.daemon.v1.PingRequest\x1a\x17.daemon.v1.PingResponse\x12=\n" + + "\x06Status\x12\x18.daemon.v1.StatusRequest\x1a\x19.daemon.v1.StatusResponse\x12C\n" + + "\bShutdown\x12\x1a.daemon.v1.ShutdownRequest\x1a\x1b.daemon.v1.ShutdownResponseB(Z&go.trai.ch/same/api/daemon/v1;daemonv1b\x06proto3" + +var ( + file_api_daemon_v1_daemon_proto_rawDescOnce sync.Once + file_api_daemon_v1_daemon_proto_rawDescData []byte +) + +func file_api_daemon_v1_daemon_proto_rawDescGZIP() []byte { + file_api_daemon_v1_daemon_proto_rawDescOnce.Do(func() { + file_api_daemon_v1_daemon_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_api_daemon_v1_daemon_proto_rawDesc), len(file_api_daemon_v1_daemon_proto_rawDesc))) + }) + return file_api_daemon_v1_daemon_proto_rawDescData +} + +var file_api_daemon_v1_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_api_daemon_v1_daemon_proto_goTypes = []any{ + (*PingRequest)(nil), // 0: daemon.v1.PingRequest + (*PingResponse)(nil), // 1: daemon.v1.PingResponse + (*StatusRequest)(nil), // 2: daemon.v1.StatusRequest + (*StatusResponse)(nil), // 3: daemon.v1.StatusResponse + (*ShutdownRequest)(nil), // 4: daemon.v1.ShutdownRequest + (*ShutdownResponse)(nil), // 5: daemon.v1.ShutdownResponse +} +var file_api_daemon_v1_daemon_proto_depIdxs = []int32{ + 0, // 0: daemon.v1.DaemonService.Ping:input_type -> daemon.v1.PingRequest + 2, // 1: daemon.v1.DaemonService.Status:input_type -> daemon.v1.StatusRequest + 4, // 2: daemon.v1.DaemonService.Shutdown:input_type -> daemon.v1.ShutdownRequest + 1, // 3: daemon.v1.DaemonService.Ping:output_type -> daemon.v1.PingResponse + 3, // 4: daemon.v1.DaemonService.Status:output_type -> daemon.v1.StatusResponse + 5, // 5: daemon.v1.DaemonService.Shutdown:output_type -> daemon.v1.ShutdownResponse + 3, // [3:6] is the sub-list for method output_type + 0, // [0:3] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_api_daemon_v1_daemon_proto_init() } +func file_api_daemon_v1_daemon_proto_init() { + if File_api_daemon_v1_daemon_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_daemon_v1_daemon_proto_rawDesc), len(file_api_daemon_v1_daemon_proto_rawDesc)), + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_api_daemon_v1_daemon_proto_goTypes, + DependencyIndexes: file_api_daemon_v1_daemon_proto_depIdxs, + MessageInfos: file_api_daemon_v1_daemon_proto_msgTypes, + }.Build() + File_api_daemon_v1_daemon_proto = out.File + file_api_daemon_v1_daemon_proto_goTypes = nil + file_api_daemon_v1_daemon_proto_depIdxs = nil +} diff --git a/cli/api/daemon/v1/daemon.proto b/cli/api/daemon/v1/daemon.proto new file mode 100644 index 0000000..482f472 --- /dev/null +++ b/cli/api/daemon/v1/daemon.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package daemon.v1; + +option go_package = "go.trai.ch/same/api/daemon/v1;daemonv1"; + +service DaemonService { + // Ping checks daemon health and resets the inactivity timer. + rpc Ping(PingRequest) returns (PingResponse); + + // Status returns current daemon status information. + rpc Status(StatusRequest) returns (StatusResponse); + + // Shutdown initiates graceful daemon termination. + rpc Shutdown(ShutdownRequest) returns (ShutdownResponse); +} + +message PingRequest {} + +message PingResponse { + // idle_remaining_seconds is the time remaining before auto-shutdown. + int64 idle_remaining_seconds = 1; +} + +message StatusRequest {} + +message StatusResponse { + bool running = 1; + int32 pid = 2; + int64 uptime_seconds = 3; + int64 last_activity_unix = 4; + int64 idle_remaining_seconds = 5; +} + +message ShutdownRequest { + // graceful indicates whether to wait for in-flight operations. + bool graceful = 1; +} + +message ShutdownResponse { + bool success = 1; +} diff --git a/cli/api/daemon/v1/daemon_grpc.pb.go b/cli/api/daemon/v1/daemon_grpc.pb.go new file mode 100644 index 0000000..280f9b9 --- /dev/null +++ b/cli/api/daemon/v1/daemon_grpc.pb.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.0 +// - protoc (unknown) +// source: api/daemon/v1/daemon.proto + +package daemonv1 + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + DaemonService_Ping_FullMethodName = "/daemon.v1.DaemonService/Ping" + DaemonService_Status_FullMethodName = "/daemon.v1.DaemonService/Status" + DaemonService_Shutdown_FullMethodName = "/daemon.v1.DaemonService/Shutdown" +) + +// DaemonServiceClient is the client API for DaemonService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DaemonServiceClient interface { + // Ping checks daemon health and resets the inactivity timer. + Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) + // Status returns current daemon status information. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // Shutdown initiates graceful daemon termination. + Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) +} + +type daemonServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewDaemonServiceClient(cc grpc.ClientConnInterface) DaemonServiceClient { + return &daemonServiceClient{cc} +} + +func (c *daemonServiceClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(PingResponse) + err := c.cc.Invoke(ctx, DaemonService_Ping_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(StatusResponse) + err := c.cc.Invoke(ctx, DaemonService_Status_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ShutdownResponse) + err := c.cc.Invoke(ctx, DaemonService_Shutdown_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DaemonServiceServer is the server API for DaemonService service. +// All implementations must embed UnimplementedDaemonServiceServer +// for forward compatibility. +type DaemonServiceServer interface { + // Ping checks daemon health and resets the inactivity timer. + Ping(context.Context, *PingRequest) (*PingResponse, error) + // Status returns current daemon status information. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // Shutdown initiates graceful daemon termination. + Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) + mustEmbedUnimplementedDaemonServiceServer() +} + +// UnimplementedDaemonServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedDaemonServiceServer struct{} + +func (UnimplementedDaemonServiceServer) Ping(context.Context, *PingRequest) (*PingResponse, error) { + return nil, status.Error(codes.Unimplemented, "method Ping not implemented") +} +func (UnimplementedDaemonServiceServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) { + return nil, status.Error(codes.Unimplemented, "method Status not implemented") +} +func (UnimplementedDaemonServiceServer) Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) { + return nil, status.Error(codes.Unimplemented, "method Shutdown not implemented") +} +func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} +func (UnimplementedDaemonServiceServer) testEmbeddedByValue() {} + +// UnsafeDaemonServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DaemonServiceServer will +// result in compilation errors. +type UnsafeDaemonServiceServer interface { + mustEmbedUnimplementedDaemonServiceServer() +} + +func RegisterDaemonServiceServer(s grpc.ServiceRegistrar, srv DaemonServiceServer) { + // If the following call panics, it indicates UnimplementedDaemonServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&DaemonService_ServiceDesc, srv) +} + +func _DaemonService_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_Ping_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).Ping(ctx, req.(*PingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_Status_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ShutdownRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_Shutdown_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).Shutdown(ctx, req.(*ShutdownRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var DaemonService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "daemon.v1.DaemonService", + HandlerType: (*DaemonServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _DaemonService_Ping_Handler, + }, + { + MethodName: "Status", + Handler: _DaemonService_Status_Handler, + }, + { + MethodName: "Shutdown", + Handler: _DaemonService_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api/daemon/v1/daemon.proto", +} diff --git a/cli/buf.gen.yaml b/cli/buf.gen.yaml new file mode 100644 index 0000000..7757009 --- /dev/null +++ b/cli/buf.gen.yaml @@ -0,0 +1,8 @@ +version: v1 +plugins: + - plugin: buf.build/protocolbuffers/go + out: . + opt: paths=source_relative + - plugin: buf.build/grpc/go + out: . + opt: paths=source_relative diff --git a/cli/buf.yaml b/cli/buf.yaml new file mode 100644 index 0000000..5fc4486 --- /dev/null +++ b/cli/buf.yaml @@ -0,0 +1,7 @@ +version: v1 +lint: + use: + - DEFAULT +breaking: + use: + - FILE diff --git a/cli/cmd/same/commands/commands_test.go b/cli/cmd/same/commands/commands_test.go index 000d092..5ea294b 100644 --- a/cli/cmd/same/commands/commands_test.go +++ b/cli/cmd/same/commands/commands_test.go @@ -26,6 +26,8 @@ func TestRun_Success(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Create a graph with one task named "build" g := domain.NewGraph() @@ -33,8 +35,7 @@ func TestRun_Success(t *testing.T) { _ = g.AddTask(buildTask) // Setup app - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) // Initialize CLI @@ -81,10 +82,11 @@ func TestRun_NoTargets(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup app - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) // Initialize CLI @@ -113,10 +115,11 @@ func TestRoot_Help(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup app - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) // Initialize CLI @@ -144,10 +147,11 @@ func TestRoot_Version(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup app - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) // Initialize CLI @@ -175,10 +179,11 @@ func TestVersionCmd(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup app - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) // Initialize CLI @@ -224,8 +229,9 @@ func setupCleanTest(t *testing.T) (*commands.CLI, *mocks.MockLogger) { mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) return commands.New(a), mockLogger @@ -358,14 +364,17 @@ func TestRun_OutputModeFlags(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) g := domain.NewGraph() g.SetRoot(".") buildTask := &domain.Task{Name: domain.NewInternedString("build"), WorkingDir: domain.NewInternedString("Root")} _ = g.AddTask(buildTask) - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) cli := commands.New(a) diff --git a/cli/cmd/same/commands/daemon.go b/cli/cmd/same/commands/daemon.go new file mode 100644 index 0000000..7cc71c6 --- /dev/null +++ b/cli/cmd/same/commands/daemon.go @@ -0,0 +1,49 @@ +package commands + +import ( + "github.com/spf13/cobra" +) + +func (c *CLI) newDaemonCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "daemon", + Short: "Manage the background daemon", + } + + cmd.AddCommand(c.newDaemonServeCmd()) + cmd.AddCommand(c.newDaemonStatusCmd()) + cmd.AddCommand(c.newDaemonStopCmd()) + + return cmd +} + +func (c *CLI) newDaemonServeCmd() *cobra.Command { + return &cobra.Command{ + Use: "serve", + Short: "Start the daemon server (internal use)", + Hidden: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return c.app.ServeDaemon(cmd.Context()) + }, + } +} + +func (c *CLI) newDaemonStatusCmd() *cobra.Command { + return &cobra.Command{ + Use: "status", + Short: "Show daemon status", + RunE: func(cmd *cobra.Command, _ []string) error { + return c.app.DaemonStatus(cmd.Context()) + }, + } +} + +func (c *CLI) newDaemonStopCmd() *cobra.Command { + return &cobra.Command{ + Use: "stop", + Short: "Stop the daemon", + RunE: func(cmd *cobra.Command, _ []string) error { + return c.app.StopDaemon(cmd.Context()) + }, + } +} diff --git a/cli/cmd/same/commands/root.go b/cli/cmd/same/commands/root.go index cb946b4..d76b306 100644 --- a/cli/cmd/same/commands/root.go +++ b/cli/cmd/same/commands/root.go @@ -45,6 +45,7 @@ func New(a *app.App) *CLI { rootCmd.AddCommand(c.newRunCmd()) rootCmd.AddCommand(c.newCleanCmd()) rootCmd.AddCommand(c.newVersionCmd()) + rootCmd.AddCommand(c.newDaemonCmd()) return c } diff --git a/cli/go.mod b/cli/go.mod index 32d2feb..b006bfa 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -20,6 +20,8 @@ require ( go.uber.org/mock v0.6.0 golang.org/x/sync v0.19.0 golang.org/x/term v0.39.0 + google.golang.org/grpc v1.78.0 + google.golang.org/protobuf v1.36.11 gopkg.in/yaml.v3 v3.0.1 ) @@ -52,7 +54,9 @@ require ( go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel/metric v1.39.0 // indirect golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.48.0 // indirect golang.org/x/sys v0.40.0 // indirect - golang.org/x/text v0.3.8 // indirect + golang.org/x/text v0.32.0 // indirect golang.org/x/tools v0.40.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect ) diff --git a/cli/go.sum b/cli/go.sum index 0f6002b..7128630 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -34,6 +34,8 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -105,6 +107,8 @@ golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZ golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -113,10 +117,18 @@ golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= -golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/cli/internal/adapters/daemon/client.go b/cli/internal/adapters/daemon/client.go new file mode 100644 index 0000000..a65ded1 --- /dev/null +++ b/cli/internal/adapters/daemon/client.go @@ -0,0 +1,73 @@ +// Package daemon implements the background daemon adapter for same. +// It provides gRPC server and client for inter-process communication over Unix Domain Sockets. +package daemon + +import ( + "context" + "time" + + "go.trai.ch/same/api/daemon/v1" + "go.trai.ch/same/internal/core/domain" + "go.trai.ch/same/internal/core/ports" + "go.trai.ch/zerr" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +// Client implements ports.DaemonClient. +type Client struct { + conn *grpc.ClientConn + client daemonv1.DaemonServiceClient +} + +// Dial connects to the daemon over UDS. +// Note: grpc.NewClient returns immediately; actual connection happens lazily on first RPC. +func Dial() (*Client, error) { + socketPath := domain.DefaultDaemonSocketPath() + target := "unix://" + socketPath + + conn, err := grpc.NewClient(target, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + return nil, zerr.Wrap(err, "daemon client creation failed") + } + + client := &Client{ + conn: conn, + client: daemonv1.NewDaemonServiceClient(conn), + } + return client, nil +} + +// Ping implements ports.DaemonClient. +func (c *Client) Ping(ctx context.Context) error { + _, err := c.client.Ping(ctx, &daemonv1.PingRequest{}) + return err +} + +// Status implements ports.DaemonClient. +func (c *Client) Status(ctx context.Context) (*ports.DaemonStatus, error) { + resp, err := c.client.Status(ctx, &daemonv1.StatusRequest{}) + if err != nil { + return nil, err + } + return &ports.DaemonStatus{ + Running: resp.Running, + PID: int(resp.Pid), + Uptime: time.Duration(resp.UptimeSeconds) * time.Second, + LastActivity: time.Unix(resp.LastActivityUnix, 0), + IdleRemaining: time.Duration(resp.IdleRemainingSeconds) * time.Second, + }, nil +} + +// Shutdown implements ports.DaemonClient. +func (c *Client) Shutdown(ctx context.Context) error { + _, err := c.client.Shutdown(ctx, &daemonv1.ShutdownRequest{Graceful: true}) + return err +} + +// Close implements ports.DaemonClient. +func (c *Client) Close() error { + return c.conn.Close() +} diff --git a/cli/internal/adapters/daemon/lifecycle.go b/cli/internal/adapters/daemon/lifecycle.go new file mode 100644 index 0000000..64a7bdd --- /dev/null +++ b/cli/internal/adapters/daemon/lifecycle.go @@ -0,0 +1,82 @@ +package daemon + +import ( + "sync" + "time" +) + +// Lifecycle manages daemon inactivity timeout and shutdown. +type Lifecycle struct { + mu sync.Mutex + timer *time.Timer + startTime time.Time + lastActivity time.Time + timeout time.Duration + shutdownChan chan struct{} + shutdownOnce sync.Once +} + +// NewLifecycle creates a new lifecycle manager with the given timeout. +func NewLifecycle(timeout time.Duration) *Lifecycle { + now := time.Now() + l := &Lifecycle{ + startTime: now, + lastActivity: now, + timeout: timeout, + shutdownChan: make(chan struct{}), + } + l.timer = time.AfterFunc(timeout, func() { + l.triggerShutdown() + }) + return l +} + +// ResetTimer resets the inactivity timer. Called on every activity. +func (l *Lifecycle) ResetTimer() { + l.mu.Lock() + defer l.mu.Unlock() + l.lastActivity = time.Now() + l.timer.Reset(l.timeout) +} + +// IdleRemaining returns the duration until auto-shutdown. +func (l *Lifecycle) IdleRemaining() time.Duration { + l.mu.Lock() + defer l.mu.Unlock() + elapsed := time.Since(l.lastActivity) + remaining := l.timeout - elapsed + if remaining < 0 { + return 0 + } + return remaining +} + +// Uptime returns how long the daemon has been running. +func (l *Lifecycle) Uptime() time.Duration { + return time.Since(l.startTime) +} + +// LastActivity returns the timestamp of the last activity. +func (l *Lifecycle) LastActivity() time.Time { + l.mu.Lock() + defer l.mu.Unlock() + return l.lastActivity +} + +// ShutdownChan returns a channel that closes when shutdown is triggered. +func (l *Lifecycle) ShutdownChan() <-chan struct{} { + return l.shutdownChan +} + +// TriggerShutdown initiates shutdown (idempotent). +func (l *Lifecycle) triggerShutdown() { + l.shutdownOnce.Do(func() { + close(l.shutdownChan) + }) +} + +// Shutdown stops the timer and triggers shutdown. +func (l *Lifecycle) Shutdown() { + l.timer.Stop() + l.triggerShutdown() +} diff --git a/cli/internal/adapters/daemon/lifecycle_test.go b/cli/internal/adapters/daemon/lifecycle_test.go new file mode 100644 index 0000000..b9a759a --- /dev/null +++ b/cli/internal/adapters/daemon/lifecycle_test.go @@ -0,0 +1,115 @@ +package daemon_test + +import ( + "testing" + "testing/synctest" + "time" + + "go.trai.ch/same/internal/adapters/daemon" +) + +func TestLifecycle_AutoShutdown(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + timeout := 100 * time.Millisecond + lc := daemon.NewLifecycle(timeout) + + select { + case <-lc.ShutdownChan(): + case <-time.After(200 * time.Millisecond): + t.Fatal("expected shutdown to be triggered") + } + synctest.Wait() + }) +} + +func TestLifecycle_ResetPreventsShutdown(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + timeout := 100 * time.Millisecond + lc := daemon.NewLifecycle(timeout) + + time.Sleep(50 * time.Millisecond) + lc.ResetTimer() + + select { + case <-lc.ShutdownChan(): + t.Fatal("shutdown should not have triggered yet") + case <-time.After(60 * time.Millisecond): + } + synctest.Wait() + }) +} + +func TestLifecycle_IdleRemaining(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + timeout := 100 * time.Millisecond + lc := daemon.NewLifecycle(timeout) + + remaining := lc.IdleRemaining() + if remaining > timeout { + t.Fatalf("idle remaining %v > timeout %v", remaining, timeout) + } + + time.Sleep(50 * time.Millisecond) + remainingAfter := lc.IdleRemaining() + + if remainingAfter >= remaining { + t.Fatalf("idle remaining should have decreased") + } + synctest.Wait() + }) +} + +func TestLifecycle_Uptime(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + lc := daemon.NewLifecycle(1 * time.Hour) + + time.Sleep(10 * time.Millisecond) + uptime := lc.Uptime() + + if uptime < 10*time.Millisecond { + t.Fatalf("uptime %v < 10ms", uptime) + } + synctest.Wait() + }) +} + +func TestLifecycle_LastActivity(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + lc := daemon.NewLifecycle(1 * time.Hour) + + initialActivity := lc.LastActivity() + if initialActivity.IsZero() { + t.Fatal("last activity should be set") + } + + time.Sleep(10 * time.Millisecond) + lc.ResetTimer() + + resetActivity := lc.LastActivity() + if !resetActivity.After(initialActivity) { + t.Fatal("last activity should have been updated") + } + synctest.Wait() + }) +} + +func TestLifecycle_Shutdown(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + lc := daemon.NewLifecycle(1 * time.Hour) + + select { + case <-lc.ShutdownChan(): + t.Fatal("should not have shutdown yet") + case <-time.After(10 * time.Millisecond): + } + + lc.Shutdown() + + select { + case <-lc.ShutdownChan(): + case <-time.After(10 * time.Millisecond): + t.Fatal("should have shutdown after calling Shutdown()") + } + synctest.Wait() + }) +} diff --git a/cli/internal/adapters/daemon/node.go b/cli/internal/adapters/daemon/node.go new file mode 100644 index 0000000..aa7f726 --- /dev/null +++ b/cli/internal/adapters/daemon/node.go @@ -0,0 +1,21 @@ +package daemon + +import ( + "context" + + "github.com/grindlemire/graft" + "go.trai.ch/same/internal/core/ports" +) + +// NodeID is the unique identifier for the daemon connector Graft node. +const NodeID graft.ID = "adapter.daemon" + +func init() { + graft.Register(graft.Node[ports.DaemonConnector]{ + ID: NodeID, + Cacheable: true, + Run: func(_ context.Context) (ports.DaemonConnector, error) { + return NewConnector() + }, + }) +} diff --git a/cli/internal/adapters/daemon/server.go b/cli/internal/adapters/daemon/server.go new file mode 100644 index 0000000..4b92ba5 --- /dev/null +++ b/cli/internal/adapters/daemon/server.go @@ -0,0 +1,126 @@ +package daemon + +import ( + "context" + "fmt" + "net" + "os" + "path/filepath" + + "go.trai.ch/same/api/daemon/v1" + "go.trai.ch/same/internal/core/domain" + "go.trai.ch/zerr" + "google.golang.org/grpc" +) + +// Server implements the gRPC daemon service. +type Server struct { + daemonv1.UnimplementedDaemonServiceServer + lifecycle *Lifecycle + grpcServer *grpc.Server + listener net.Listener +} + +// NewServer creates a new daemon server. +func NewServer(lifecycle *Lifecycle) *Server { + s := &Server{ + lifecycle: lifecycle, + grpcServer: grpc.NewServer(), + } + daemonv1.RegisterDaemonServiceServer(s.grpcServer, s) + return s +} + +// Serve starts the gRPC server on the UDS. +func (s *Server) Serve(ctx context.Context) error { + socketPath := domain.DefaultDaemonSocketPath() + + dir := filepath.Dir(socketPath) + if err := os.MkdirAll(dir, domain.DirPerm); err != nil { + return zerr.Wrap(err, "failed to create daemon directory") + } + + if err := os.Remove(socketPath); err != nil && !os.IsNotExist(err) { + return zerr.Wrap(err, "failed to remove stale socket") + } + + lis, err := net.Listen("unix", socketPath) + if err != nil { + return zerr.Wrap(err, "failed to listen on UDS") + } + s.listener = lis + // Note: There's a brief window between socket creation and chmod where + // the socket has default permissions. This is an acceptable trade-off + // for code clarity. For defense-in-depth, consider setting umask before + // Listen if this window becomes a concern. + + if err := os.Chmod(socketPath, domain.SocketPerm); err != nil { + _ = lis.Close() + return zerr.Wrap(err, "failed to set socket permissions") + } + + if err := s.writePIDFile(); err != nil { + return err + } + + defer s.cleanup() + + errCh := make(chan error, 1) + go func() { + errCh <- s.grpcServer.Serve(lis) + }() + + select { + case <-ctx.Done(): + s.grpcServer.GracefulStop() + return ctx.Err() + case <-s.lifecycle.ShutdownChan(): + s.grpcServer.GracefulStop() + return nil + case err := <-errCh: + return err + } +} + +func (s *Server) cleanup() { + _ = os.Remove(domain.DefaultDaemonSocketPath()) + _ = os.Remove(domain.DefaultDaemonPIDPath()) +} + +// Ping implements DaemonService.Ping. +func (s *Server) Ping(_ context.Context, _ *daemonv1.PingRequest) (*daemonv1.PingResponse, error) { + s.lifecycle.ResetTimer() + return &daemonv1.PingResponse{ + IdleRemainingSeconds: int64(s.lifecycle.IdleRemaining().Seconds()), + }, nil +} + +// Status implements DaemonService.Status. +func (s *Server) Status(_ context.Context, _ *daemonv1.StatusRequest) (*daemonv1.StatusResponse, error) { + s.lifecycle.ResetTimer() + pid := os.Getpid() + const maxInt32 = 2147483647 + if pid > maxInt32 { + pid = maxInt32 + } + return &daemonv1.StatusResponse{ + Running: true, + //nolint:gosec // G115: Safe conversion - pid is capped to maxInt32 above + Pid: int32(pid), + UptimeSeconds: int64(s.lifecycle.Uptime().Seconds()), + LastActivityUnix: s.lifecycle.LastActivity().Unix(), + IdleRemainingSeconds: int64(s.lifecycle.IdleRemaining().Seconds()), + }, nil +} + +// Shutdown implements DaemonService.Shutdown. +func (s *Server) Shutdown(_ context.Context, _ *daemonv1.ShutdownRequest) (*daemonv1.ShutdownResponse, error) { + s.lifecycle.Shutdown() + return &daemonv1.ShutdownResponse{Success: true}, nil +} + +func (s *Server) writePIDFile() error { + pidPath := domain.DefaultDaemonPIDPath() + pid := os.Getpid() + return os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", pid)), domain.PrivateFilePerm) +} diff --git a/cli/internal/adapters/daemon/spawner.go b/cli/internal/adapters/daemon/spawner.go new file mode 100644 index 0000000..1702dc2 --- /dev/null +++ b/cli/internal/adapters/daemon/spawner.go @@ -0,0 +1,143 @@ +package daemon + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "syscall" + "time" + + "go.trai.ch/same/internal/core/domain" + "go.trai.ch/same/internal/core/ports" + "go.trai.ch/zerr" +) + +const ( + pollInterval = 100 * time.Millisecond + maxPollDuration = 5 * time.Second +) + +// Connector implements ports.DaemonConnector. +type Connector struct { + executablePath string +} + +// NewConnector creates a new daemon connector. +func NewConnector() (*Connector, error) { + exe, err := os.Executable() + if err != nil { + return nil, zerr.Wrap(err, "failed to determine executable path") + } + return &Connector{executablePath: exe}, nil +} + +// Connect returns a client, spawning the daemon if necessary. +func (c *Connector) Connect(ctx context.Context) (ports.DaemonClient, error) { + client, err := Dial() + if err == nil { + if pingErr := client.Ping(ctx); pingErr == nil { + return client, nil + } + _ = client.Close() + } + + if spawnErr := c.Spawn(ctx); spawnErr != nil { + return nil, spawnErr + } + + client, err = Dial() + if err != nil { + return nil, zerr.Wrap(err, "daemon client creation failed") + } + + if pingErr := client.Ping(ctx); pingErr != nil { + _ = client.Close() + return nil, zerr.Wrap(pingErr, "daemon started but is not responsive") + } + + return client, nil +} + +// IsRunning checks if the daemon is running and responsive. +func (c *Connector) IsRunning() bool { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + return c.isRunningWithCtx(ctx) +} + +// isRunningWithCtx checks if the daemon is running and responsive, respecting the provided context. +func (c *Connector) isRunningWithCtx(ctx context.Context) bool { + client, err := Dial() + if err != nil { + return false + } + defer func() { _ = client.Close() }() + + if err := client.Ping(ctx); err != nil { + return false + } + + return true +} + +// Spawn starts the daemon process in the background. +func (c *Connector) Spawn(ctx context.Context) error { + daemonDir := filepath.Dir(domain.DefaultDaemonSocketPath()) + if err := os.MkdirAll(daemonDir, domain.DirPerm); err != nil { + return zerr.Wrap(err, "failed to create daemon directory") + } + + logPath := domain.DefaultDaemonLogPath() + //nolint:gosec // G304: logPath is from domain.DefaultDaemonLogPath(), not user input + logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, domain.PrivateFilePerm) + if err != nil { + return zerr.Wrap(err, "failed to open daemon log") + } + + //nolint:gosec // G204: executablePath is controlled, args are fixed literals + cmd := exec.Command(c.executablePath, "daemon", "serve") + cmd.Stdout = logFile + cmd.Stderr = logFile + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setsid: true, + } + + if err := cmd.Start(); err != nil { + _ = logFile.Close() + return zerr.Wrap(err, domain.ErrDaemonSpawnFailed.Error()) + } + + go func() { + _ = cmd.Wait() + _ = logFile.Close() + }() + + if err := c.waitForDaemonStartup(ctx); err != nil { + return err + } + + return nil +} + +// waitForDaemonStartup waits for the daemon to become responsive. +func (c *Connector) waitForDaemonStartup(ctx context.Context) error { + start := time.Now() + for time.Since(start) < maxPollDuration { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if c.isRunningWithCtx(ctx) { + return nil + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(pollInterval): + } + } + return zerr.New("daemon failed to start within timeout") +} diff --git a/cli/internal/app/app.go b/cli/internal/app/app.go index b81c406..aa5a709 100644 --- a/cli/internal/app/app.go +++ b/cli/internal/app/app.go @@ -11,6 +11,7 @@ import ( tea "github.com/charmbracelet/bubbletea" "go.opentelemetry.io/otel" sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.trai.ch/same/internal/adapters/daemon" "go.trai.ch/same/internal/adapters/detector" "go.trai.ch/same/internal/adapters/linear" "go.trai.ch/same/internal/adapters/telemetry" @@ -31,6 +32,7 @@ type App struct { hasher ports.Hasher resolver ports.InputResolver envFactory ports.EnvironmentFactory + connector ports.DaemonConnector teaOptions []tea.ProgramOption disableTick bool } @@ -44,6 +46,7 @@ func New( hasher ports.Hasher, resolver ports.InputResolver, envFactory ports.EnvironmentFactory, + connector ports.DaemonConnector, ) *App { return &App{ configLoader: loader, @@ -53,6 +56,7 @@ func New( hasher: hasher, resolver: resolver, envFactory: envFactory, + connector: connector, } } @@ -209,12 +213,68 @@ func (a *App) Clean(_ context.Context, options CleanOptions) error { // setupOTel configures the OpenTelemetry SDK with the renderer bridge. func setupOTel(bridge *telemetry.Bridge) { - // Create a new TracerProvider with the bridge as a SpanProcessor. - // This ensures that all started spans are reported to the renderer. tp := sdktrace.NewTracerProvider( sdktrace.WithSpanProcessor(bridge), ) - // Register it as the global provider. otel.SetTracerProvider(tp) } + +// ServeDaemon starts the daemon server. +func (a *App) ServeDaemon(ctx context.Context) error { + lifecycle := daemon.NewLifecycle(domain.DaemonInactivityTimeout) + server := daemon.NewServer(lifecycle) + + a.logger.Info("daemon starting") + + if err := server.Serve(ctx); err != nil { + return zerr.Wrap(err, "daemon server error") + } + + a.logger.Info("daemon stopped") + return nil +} + +// DaemonStatus returns the current daemon status. +func (a *App) DaemonStatus(ctx context.Context) error { + client, err := a.connector.Connect(ctx) + if err != nil { + return zerr.Wrap(err, "failed to connect to daemon") + } + defer func() { + _ = client.Close() + }() + + status, err := client.Status(ctx) + if err != nil { + return zerr.Wrap(err, "failed to get daemon status") + } + + a.logger.Info("Daemon Status:") + a.logger.Info(fmt.Sprintf(" Running: %v", status.Running)) + a.logger.Info(fmt.Sprintf(" PID: %d", status.PID)) + a.logger.Info(fmt.Sprintf(" Uptime: %v", status.Uptime)) + a.logger.Info(fmt.Sprintf(" Last Activity: %v", status.LastActivity)) + a.logger.Info(fmt.Sprintf(" Idle Remaining: %v", status.IdleRemaining)) + + return nil +} + +// StopDaemon stops the daemon. +func (a *App) StopDaemon(ctx context.Context) error { + client, err := a.connector.Connect(ctx) + if err != nil { + return zerr.Wrap(err, "failed to connect to daemon") + } + defer func() { + _ = client.Close() + }() + + a.logger.Info("stopping daemon") + if err := client.Shutdown(ctx); err != nil { + return zerr.Wrap(err, "failed to stop daemon") + } + + a.logger.Info("daemon stopped") + return nil +} diff --git a/cli/internal/app/app_test.go b/cli/internal/app/app_test.go index 3287228..bc02dc6 100644 --- a/cli/internal/app/app_test.go +++ b/cli/internal/app/app_test.go @@ -44,6 +44,7 @@ func TestApp_Build(t *testing.T) { mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup Graph g := domain.NewGraph() @@ -52,7 +53,9 @@ func TestApp_Build(t *testing.T) { _ = g.AddTask(task) // Setup App - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("")), tea.WithOutput(io.Discard), @@ -105,10 +108,13 @@ func TestApp_Run_NoTargets(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup App - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("")), tea.WithOutput(io.Discard), @@ -157,10 +163,13 @@ func TestApp_Run_ConfigLoaderError(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup App - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("")), tea.WithOutput(io.Discard), @@ -213,6 +222,7 @@ func TestApp_Run_BuildExecutionFailed(t *testing.T) { mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup Graph g := domain.NewGraph() @@ -221,7 +231,9 @@ func TestApp_Run_BuildExecutionFailed(t *testing.T) { _ = g.AddTask(task) // Setup App - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("")), tea.WithOutput(io.Discard), @@ -370,7 +382,7 @@ func TestApp_Clean(t *testing.T) { mockLogger.EXPECT().Info(gomock.Any()).AnyTimes() // Null dependencies for others - a := app.New(nil, nil, mockLogger, nil, nil, nil, nil) + a := app.New(nil, nil, mockLogger, nil, nil, nil, nil, nil) err = a.Clean(context.Background(), tt.options) if err != nil { @@ -409,13 +421,16 @@ func TestApp_Run_LinearMode(t *testing.T) { mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) g := domain.NewGraph() g.SetRoot(".") task := &domain.Task{Name: domain.NewInternedString("task1"), WorkingDir: domain.NewInternedString("Root")} _ = g.AddTask(task) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("")), tea.WithOutput(io.Discard), @@ -468,13 +483,16 @@ func TestApp_Run_InspectMode(t *testing.T) { mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) g := domain.NewGraph() g.SetRoot(".") task := &domain.Task{Name: domain.NewInternedString("task1"), WorkingDir: domain.NewInternedString("Root")} _ = g.AddTask(task) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("q")), tea.WithOutput(io.Discard), diff --git a/cli/internal/app/node.go b/cli/internal/app/node.go index 54edf5b..187cd11 100644 --- a/cli/internal/app/node.go +++ b/cli/internal/app/node.go @@ -6,6 +6,7 @@ import ( "github.com/grindlemire/graft" "go.trai.ch/same/internal/adapters/cas" //nolint:depguard // Wired in app layer "go.trai.ch/same/internal/adapters/config" //nolint:depguard // Wired in app layer + "go.trai.ch/same/internal/adapters/daemon" //nolint:depguard // Wired in app layer "go.trai.ch/same/internal/adapters/fs" //nolint:depguard // Wired in app layer "go.trai.ch/same/internal/adapters/logger" //nolint:depguard // Wired in app layer "go.trai.ch/same/internal/adapters/nix" //nolint:depguard // Wired in app layer @@ -34,6 +35,7 @@ func init() { fs.HasherNodeID, fs.ResolverNodeID, nix.EnvFactoryNodeID, + daemon.NodeID, }, Run: runAppNode, }) @@ -87,7 +89,12 @@ func runAppNode(ctx context.Context) (*App, error) { return nil, err } - return New(loader, executor, log, store, hasher, resolver, envFactory), nil + connector, err := graft.Dep[ports.DaemonConnector](ctx) + if err != nil { + return nil, err + } + + return New(loader, executor, log, store, hasher, resolver, envFactory, connector), nil } func runComponentsNode(ctx context.Context) (*Components, error) { diff --git a/cli/internal/core/domain/errors.go b/cli/internal/core/domain/errors.go index f383d51..c372b72 100644 --- a/cli/internal/core/domain/errors.go +++ b/cli/internal/core/domain/errors.go @@ -152,4 +152,16 @@ var ( // ErrCacheMiss is returned when a requested item is not found in the cache. ErrCacheMiss = zerr.New("cache miss") + + // ErrDaemonNotRunning is returned when the daemon is not running. + ErrDaemonNotRunning = zerr.New("daemon is not running") + + // ErrDaemonSpawnFailed is returned when spawning the daemon fails. + ErrDaemonSpawnFailed = zerr.New("failed to spawn daemon process") + + // ErrDaemonShutdownFailed is returned when graceful shutdown fails. + ErrDaemonShutdownFailed = zerr.New("failed to shutdown daemon gracefully") + + // ErrDaemonAlreadyRunning is returned when attempting to start a daemon that's already running. + ErrDaemonAlreadyRunning = zerr.New("daemon is already running") ) diff --git a/cli/internal/core/domain/layout.go b/cli/internal/core/domain/layout.go index 995d64f..9bdf3cc 100644 --- a/cli/internal/core/domain/layout.go +++ b/cli/internal/core/domain/layout.go @@ -1,6 +1,9 @@ package domain -import "path/filepath" +import ( + "path/filepath" + "time" +) const ( // SameDirName is the name of the internal workspace directory. @@ -18,6 +21,9 @@ const ( // EnvDirName is the name of the environment cache directory. EnvDirName = "environments" + // DaemonDirName is the name of the daemon directory. + DaemonDirName = "daemon" + // SameFileName is the name of the project configuration file. SameFileName = "same.yaml" @@ -27,6 +33,15 @@ const ( // DebugLogFile is the name of the debug log file. DebugLogFile = "debug.log" + // DaemonSocketName is the name of the daemon Unix socket file. + DaemonSocketName = "daemon.sock" + + // DaemonPIDFileName is the name of the daemon PID file. + DaemonPIDFileName = "daemon.pid" + + // DaemonLogFileName is the name of the daemon log file. + DaemonLogFileName = "daemon.log" + // DirPerm is the default permission for directories (rwxr-x---). DirPerm = 0o750 @@ -35,6 +50,13 @@ const ( // PrivateFilePerm is the default permission for private files (rw-------). PrivateFilePerm = 0o600 + + // SocketPerm is the permission for Unix domain sockets (rwxr-x---). + // This ensures the socket is accessible to the owner and group, but not others. + SocketPerm = 0o750 + + // DaemonInactivityTimeout is the duration after which the daemon shuts down. + DaemonInactivityTimeout = 3 * time.Hour ) // DefaultSamePath returns the default root directory for same metadata. @@ -65,3 +87,18 @@ func DefaultEnvCachePath() string { func DefaultDebugLogPath() string { return filepath.Join(SameDirName, DebugLogFile) } + +// DefaultDaemonSocketPath returns the path to the daemon Unix socket. +func DefaultDaemonSocketPath() string { + return filepath.Join(SameDirName, DaemonDirName, DaemonSocketName) +} + +// DefaultDaemonPIDPath returns the path to the daemon PID file. +func DefaultDaemonPIDPath() string { + return filepath.Join(SameDirName, DaemonDirName, DaemonPIDFileName) +} + +// DefaultDaemonLogPath returns the path to the daemon log file. +func DefaultDaemonLogPath() string { + return filepath.Join(SameDirName, DaemonDirName, DaemonLogFileName) +} diff --git a/cli/internal/core/ports/daemon.go b/cli/internal/core/ports/daemon.go new file mode 100644 index 0000000..23e48c5 --- /dev/null +++ b/cli/internal/core/ports/daemon.go @@ -0,0 +1,44 @@ +package ports + +import ( + "context" + "time" +) + +//go:generate mockgen -source=daemon.go -destination=mocks/mock_daemon.go -package=mocks + +// DaemonStatus represents the current state of the daemon. +type DaemonStatus struct { + Running bool + PID int + Uptime time.Duration + LastActivity time.Time + IdleRemaining time.Duration +} + +// DaemonClient defines the interface for communicating with the daemon. +type DaemonClient interface { + // Ping checks if the daemon is alive and resets the inactivity timer. + Ping(ctx context.Context) error + + // Status returns the current daemon status. + Status(ctx context.Context) (*DaemonStatus, error) + + // Shutdown requests a graceful daemon shutdown. + Shutdown(ctx context.Context) error + + // Close releases client resources. + Close() error +} + +// DaemonConnector manages daemon lifecycle from the CLI perspective. +type DaemonConnector interface { + // Connect returns a client to the daemon, spawning it if necessary. + Connect(ctx context.Context) (DaemonClient, error) + + // IsRunning checks if the daemon process is currently running. + IsRunning() bool + + // Spawn starts a new daemon process in the background. + Spawn(ctx context.Context) error +} diff --git a/cli/internal/wiring/wiring.go b/cli/internal/wiring/wiring.go index 2b0eb56..f7c637f 100644 --- a/cli/internal/wiring/wiring.go +++ b/cli/internal/wiring/wiring.go @@ -5,6 +5,7 @@ import ( // Register adapter nodes. _ "go.trai.ch/same/internal/adapters/cas" _ "go.trai.ch/same/internal/adapters/config" + _ "go.trai.ch/same/internal/adapters/daemon" _ "go.trai.ch/same/internal/adapters/fs" _ "go.trai.ch/same/internal/adapters/logger" _ "go.trai.ch/same/internal/adapters/nix" diff --git a/flake.nix b/flake.nix index d83c05f..d92a9b8 100644 --- a/flake.nix +++ b/flake.nix @@ -34,6 +34,7 @@ mockgen gci gofumpt + buf ]; in diff --git a/go.work.sum b/go.work.sum index 74d82b8..5681213 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1,3 +1,9 @@ +cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= @@ -12,26 +18,52 @@ github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a h1:G99k github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY= github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA= github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= From 3650ffef8f04b71016e610e3a6725c9753c7e34a Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Tue, 27 Jan 2026 16:11:58 +0100 Subject: [PATCH 02/20] feat(#74): refactor scheduler to request graph and env from daemon - Extend daemon proto with GetGraph and GetEnvironment RPCs - Add GraphCacheEntry domain type for cache metadata - Extend DaemonClient interface with GetGraph/GetEnvironment methods - Implement thread-safe ServerCache for graph and environment caching - Add daemon RPC handlers for GetGraph and GetEnvironment - Implement daemon client methods with string interning - Add DiscoverConfigPaths to config loader for cache validation - Refactor Scheduler to optionally use daemon client with fallback - Refactor App layer to request graph from daemon with local fallback --- cli/api/daemon/v1/daemon.pb.go | 476 ++++++++++++++++++++- cli/api/daemon/v1/daemon.proto | 44 ++ cli/api/daemon/v1/daemon_grpc.pb.go | 86 +++- cli/internal/adapters/config/loader.go | 78 ++++ cli/internal/adapters/daemon/cache.go | 75 ++++ cli/internal/adapters/daemon/client.go | 72 ++++ cli/internal/adapters/daemon/server.go | 131 +++++- cli/internal/app/app.go | 42 +- cli/internal/core/domain/cache.go | 9 + cli/internal/core/ports/config_loader.go | 4 + cli/internal/core/ports/daemon.go | 17 + cli/internal/engine/scheduler/scheduler.go | 25 +- 12 files changed, 1027 insertions(+), 32 deletions(-) create mode 100644 cli/internal/adapters/daemon/cache.go create mode 100644 cli/internal/core/domain/cache.go diff --git a/cli/api/daemon/v1/daemon.pb.go b/cli/api/daemon/v1/daemon.pb.go index 14d0b36..7248b6b 100644 --- a/cli/api/daemon/v1/daemon.pb.go +++ b/cli/api/daemon/v1/daemon.pb.go @@ -304,6 +304,382 @@ func (x *ShutdownResponse) GetSuccess() bool { return false } +type ConfigMtime struct { + state protoimpl.MessageState `protogen:"open.v1"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + MtimeUnixNano int64 `protobuf:"varint,2,opt,name=mtime_unix_nano,json=mtimeUnixNano,proto3" json:"mtime_unix_nano,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConfigMtime) Reset() { + *x = ConfigMtime{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConfigMtime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigMtime) ProtoMessage() {} + +func (x *ConfigMtime) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigMtime.ProtoReflect.Descriptor instead. +func (*ConfigMtime) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{6} +} + +func (x *ConfigMtime) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *ConfigMtime) GetMtimeUnixNano() int64 { + if x != nil { + return x.MtimeUnixNano + } + return 0 +} + +type GetGraphRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Cwd string `protobuf:"bytes,1,opt,name=cwd,proto3" json:"cwd,omitempty"` + ConfigMtimes []*ConfigMtime `protobuf:"bytes,2,rep,name=config_mtimes,json=configMtimes,proto3" json:"config_mtimes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetGraphRequest) Reset() { + *x = GetGraphRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetGraphRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetGraphRequest) ProtoMessage() {} + +func (x *GetGraphRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetGraphRequest.ProtoReflect.Descriptor instead. +func (*GetGraphRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{7} +} + +func (x *GetGraphRequest) GetCwd() string { + if x != nil { + return x.Cwd + } + return "" +} + +func (x *GetGraphRequest) GetConfigMtimes() []*ConfigMtime { + if x != nil { + return x.ConfigMtimes + } + return nil +} + +type TaskProto struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Command []string `protobuf:"bytes,2,rep,name=command,proto3" json:"command,omitempty"` + Inputs []string `protobuf:"bytes,3,rep,name=inputs,proto3" json:"inputs,omitempty"` + Outputs []string `protobuf:"bytes,4,rep,name=outputs,proto3" json:"outputs,omitempty"` + Tools map[string]string `protobuf:"bytes,5,rep,name=tools,proto3" json:"tools,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Dependencies []string `protobuf:"bytes,6,rep,name=dependencies,proto3" json:"dependencies,omitempty"` + Environment map[string]string `protobuf:"bytes,7,rep,name=environment,proto3" json:"environment,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + WorkingDir string `protobuf:"bytes,8,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` + RebuildStrategy string `protobuf:"bytes,9,opt,name=rebuild_strategy,json=rebuildStrategy,proto3" json:"rebuild_strategy,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TaskProto) Reset() { + *x = TaskProto{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TaskProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskProto) ProtoMessage() {} + +func (x *TaskProto) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskProto.ProtoReflect.Descriptor instead. +func (*TaskProto) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{8} +} + +func (x *TaskProto) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *TaskProto) GetCommand() []string { + if x != nil { + return x.Command + } + return nil +} + +func (x *TaskProto) GetInputs() []string { + if x != nil { + return x.Inputs + } + return nil +} + +func (x *TaskProto) GetOutputs() []string { + if x != nil { + return x.Outputs + } + return nil +} + +func (x *TaskProto) GetTools() map[string]string { + if x != nil { + return x.Tools + } + return nil +} + +func (x *TaskProto) GetDependencies() []string { + if x != nil { + return x.Dependencies + } + return nil +} + +func (x *TaskProto) GetEnvironment() map[string]string { + if x != nil { + return x.Environment + } + return nil +} + +func (x *TaskProto) GetWorkingDir() string { + if x != nil { + return x.WorkingDir + } + return "" +} + +func (x *TaskProto) GetRebuildStrategy() string { + if x != nil { + return x.RebuildStrategy + } + return "" +} + +type GetGraphResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + CacheHit bool `protobuf:"varint,1,opt,name=cache_hit,json=cacheHit,proto3" json:"cache_hit,omitempty"` + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + Tasks []*TaskProto `protobuf:"bytes,3,rep,name=tasks,proto3" json:"tasks,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetGraphResponse) Reset() { + *x = GetGraphResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetGraphResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetGraphResponse) ProtoMessage() {} + +func (x *GetGraphResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetGraphResponse.ProtoReflect.Descriptor instead. +func (*GetGraphResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{9} +} + +func (x *GetGraphResponse) GetCacheHit() bool { + if x != nil { + return x.CacheHit + } + return false +} + +func (x *GetGraphResponse) GetRoot() string { + if x != nil { + return x.Root + } + return "" +} + +func (x *GetGraphResponse) GetTasks() []*TaskProto { + if x != nil { + return x.Tasks + } + return nil +} + +type GetEnvironmentRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + EnvId string `protobuf:"bytes,1,opt,name=env_id,json=envId,proto3" json:"env_id,omitempty"` + Tools map[string]string `protobuf:"bytes,2,rep,name=tools,proto3" json:"tools,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetEnvironmentRequest) Reset() { + *x = GetEnvironmentRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetEnvironmentRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEnvironmentRequest) ProtoMessage() {} + +func (x *GetEnvironmentRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEnvironmentRequest.ProtoReflect.Descriptor instead. +func (*GetEnvironmentRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{10} +} + +func (x *GetEnvironmentRequest) GetEnvId() string { + if x != nil { + return x.EnvId + } + return "" +} + +func (x *GetEnvironmentRequest) GetTools() map[string]string { + if x != nil { + return x.Tools + } + return nil +} + +type GetEnvironmentResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + CacheHit bool `protobuf:"varint,1,opt,name=cache_hit,json=cacheHit,proto3" json:"cache_hit,omitempty"` + EnvVars []string `protobuf:"bytes,2,rep,name=env_vars,json=envVars,proto3" json:"env_vars,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetEnvironmentResponse) Reset() { + *x = GetEnvironmentResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetEnvironmentResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEnvironmentResponse) ProtoMessage() {} + +func (x *GetEnvironmentResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEnvironmentResponse.ProtoReflect.Descriptor instead. +func (*GetEnvironmentResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{11} +} + +func (x *GetEnvironmentResponse) GetCacheHit() bool { + if x != nil { + return x.CacheHit + } + return false +} + +func (x *GetEnvironmentResponse) GetEnvVars() []string { + if x != nil { + return x.EnvVars + } + return nil +} + var File_api_daemon_v1_daemon_proto protoreflect.FileDescriptor const file_api_daemon_v1_daemon_proto_rawDesc = "" + @@ -322,11 +698,51 @@ const file_api_daemon_v1_daemon_proto_rawDesc = "" + "\x0fShutdownRequest\x12\x1a\n" + "\bgraceful\x18\x01 \x01(\bR\bgraceful\",\n" + "\x10ShutdownResponse\x12\x18\n" + - "\asuccess\x18\x01 \x01(\bR\asuccess2\xcc\x01\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\"I\n" + + "\vConfigMtime\x12\x12\n" + + "\x04path\x18\x01 \x01(\tR\x04path\x12&\n" + + "\x0fmtime_unix_nano\x18\x02 \x01(\x03R\rmtimeUnixNano\"`\n" + + "\x0fGetGraphRequest\x12\x10\n" + + "\x03cwd\x18\x01 \x01(\tR\x03cwd\x12;\n" + + "\rconfig_mtimes\x18\x02 \x03(\v2\x16.daemon.v1.ConfigMtimeR\fconfigMtimes\"\xd5\x03\n" + + "\tTaskProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\acommand\x18\x02 \x03(\tR\acommand\x12\x16\n" + + "\x06inputs\x18\x03 \x03(\tR\x06inputs\x12\x18\n" + + "\aoutputs\x18\x04 \x03(\tR\aoutputs\x125\n" + + "\x05tools\x18\x05 \x03(\v2\x1f.daemon.v1.TaskProto.ToolsEntryR\x05tools\x12\"\n" + + "\fdependencies\x18\x06 \x03(\tR\fdependencies\x12G\n" + + "\venvironment\x18\a \x03(\v2%.daemon.v1.TaskProto.EnvironmentEntryR\venvironment\x12\x1f\n" + + "\vworking_dir\x18\b \x01(\tR\n" + + "workingDir\x12)\n" + + "\x10rebuild_strategy\x18\t \x01(\tR\x0frebuildStrategy\x1a8\n" + + "\n" + + "ToolsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1a>\n" + + "\x10EnvironmentEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"o\n" + + "\x10GetGraphResponse\x12\x1b\n" + + "\tcache_hit\x18\x01 \x01(\bR\bcacheHit\x12\x12\n" + + "\x04root\x18\x02 \x01(\tR\x04root\x12*\n" + + "\x05tasks\x18\x03 \x03(\v2\x14.daemon.v1.TaskProtoR\x05tasks\"\xab\x01\n" + + "\x15GetEnvironmentRequest\x12\x15\n" + + "\x06env_id\x18\x01 \x01(\tR\x05envId\x12A\n" + + "\x05tools\x18\x02 \x03(\v2+.daemon.v1.GetEnvironmentRequest.ToolsEntryR\x05tools\x1a8\n" + + "\n" + + "ToolsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"P\n" + + "\x16GetEnvironmentResponse\x12\x1b\n" + + "\tcache_hit\x18\x01 \x01(\bR\bcacheHit\x12\x19\n" + + "\benv_vars\x18\x02 \x03(\tR\aenvVars2\xe8\x02\n" + "\rDaemonService\x127\n" + "\x04Ping\x12\x16.daemon.v1.PingRequest\x1a\x17.daemon.v1.PingResponse\x12=\n" + "\x06Status\x12\x18.daemon.v1.StatusRequest\x1a\x19.daemon.v1.StatusResponse\x12C\n" + - "\bShutdown\x12\x1a.daemon.v1.ShutdownRequest\x1a\x1b.daemon.v1.ShutdownResponseB(Z&go.trai.ch/same/api/daemon/v1;daemonv1b\x06proto3" + "\bShutdown\x12\x1a.daemon.v1.ShutdownRequest\x1a\x1b.daemon.v1.ShutdownResponse\x12C\n" + + "\bGetGraph\x12\x1a.daemon.v1.GetGraphRequest\x1a\x1b.daemon.v1.GetGraphResponse\x12U\n" + + "\x0eGetEnvironment\x12 .daemon.v1.GetEnvironmentRequest\x1a!.daemon.v1.GetEnvironmentResponseB(Z&go.trai.ch/same/api/daemon/v1;daemonv1b\x06proto3" var ( file_api_daemon_v1_daemon_proto_rawDescOnce sync.Once @@ -340,27 +756,45 @@ func file_api_daemon_v1_daemon_proto_rawDescGZIP() []byte { return file_api_daemon_v1_daemon_proto_rawDescData } -var file_api_daemon_v1_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_api_daemon_v1_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 15) var file_api_daemon_v1_daemon_proto_goTypes = []any{ - (*PingRequest)(nil), // 0: daemon.v1.PingRequest - (*PingResponse)(nil), // 1: daemon.v1.PingResponse - (*StatusRequest)(nil), // 2: daemon.v1.StatusRequest - (*StatusResponse)(nil), // 3: daemon.v1.StatusResponse - (*ShutdownRequest)(nil), // 4: daemon.v1.ShutdownRequest - (*ShutdownResponse)(nil), // 5: daemon.v1.ShutdownResponse + (*PingRequest)(nil), // 0: daemon.v1.PingRequest + (*PingResponse)(nil), // 1: daemon.v1.PingResponse + (*StatusRequest)(nil), // 2: daemon.v1.StatusRequest + (*StatusResponse)(nil), // 3: daemon.v1.StatusResponse + (*ShutdownRequest)(nil), // 4: daemon.v1.ShutdownRequest + (*ShutdownResponse)(nil), // 5: daemon.v1.ShutdownResponse + (*ConfigMtime)(nil), // 6: daemon.v1.ConfigMtime + (*GetGraphRequest)(nil), // 7: daemon.v1.GetGraphRequest + (*TaskProto)(nil), // 8: daemon.v1.TaskProto + (*GetGraphResponse)(nil), // 9: daemon.v1.GetGraphResponse + (*GetEnvironmentRequest)(nil), // 10: daemon.v1.GetEnvironmentRequest + (*GetEnvironmentResponse)(nil), // 11: daemon.v1.GetEnvironmentResponse + nil, // 12: daemon.v1.TaskProto.ToolsEntry + nil, // 13: daemon.v1.TaskProto.EnvironmentEntry + nil, // 14: daemon.v1.GetEnvironmentRequest.ToolsEntry } var file_api_daemon_v1_daemon_proto_depIdxs = []int32{ - 0, // 0: daemon.v1.DaemonService.Ping:input_type -> daemon.v1.PingRequest - 2, // 1: daemon.v1.DaemonService.Status:input_type -> daemon.v1.StatusRequest - 4, // 2: daemon.v1.DaemonService.Shutdown:input_type -> daemon.v1.ShutdownRequest - 1, // 3: daemon.v1.DaemonService.Ping:output_type -> daemon.v1.PingResponse - 3, // 4: daemon.v1.DaemonService.Status:output_type -> daemon.v1.StatusResponse - 5, // 5: daemon.v1.DaemonService.Shutdown:output_type -> daemon.v1.ShutdownResponse - 3, // [3:6] is the sub-list for method output_type - 0, // [0:3] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 6, // 0: daemon.v1.GetGraphRequest.config_mtimes:type_name -> daemon.v1.ConfigMtime + 12, // 1: daemon.v1.TaskProto.tools:type_name -> daemon.v1.TaskProto.ToolsEntry + 13, // 2: daemon.v1.TaskProto.environment:type_name -> daemon.v1.TaskProto.EnvironmentEntry + 8, // 3: daemon.v1.GetGraphResponse.tasks:type_name -> daemon.v1.TaskProto + 14, // 4: daemon.v1.GetEnvironmentRequest.tools:type_name -> daemon.v1.GetEnvironmentRequest.ToolsEntry + 0, // 5: daemon.v1.DaemonService.Ping:input_type -> daemon.v1.PingRequest + 2, // 6: daemon.v1.DaemonService.Status:input_type -> daemon.v1.StatusRequest + 4, // 7: daemon.v1.DaemonService.Shutdown:input_type -> daemon.v1.ShutdownRequest + 7, // 8: daemon.v1.DaemonService.GetGraph:input_type -> daemon.v1.GetGraphRequest + 10, // 9: daemon.v1.DaemonService.GetEnvironment:input_type -> daemon.v1.GetEnvironmentRequest + 1, // 10: daemon.v1.DaemonService.Ping:output_type -> daemon.v1.PingResponse + 3, // 11: daemon.v1.DaemonService.Status:output_type -> daemon.v1.StatusResponse + 5, // 12: daemon.v1.DaemonService.Shutdown:output_type -> daemon.v1.ShutdownResponse + 9, // 13: daemon.v1.DaemonService.GetGraph:output_type -> daemon.v1.GetGraphResponse + 11, // 14: daemon.v1.DaemonService.GetEnvironment:output_type -> daemon.v1.GetEnvironmentResponse + 10, // [10:15] is the sub-list for method output_type + 5, // [5:10] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name } func init() { file_api_daemon_v1_daemon_proto_init() } @@ -374,7 +808,7 @@ func file_api_daemon_v1_daemon_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_daemon_v1_daemon_proto_rawDesc), len(file_api_daemon_v1_daemon_proto_rawDesc)), NumEnums: 0, - NumMessages: 6, + NumMessages: 15, NumExtensions: 0, NumServices: 1, }, diff --git a/cli/api/daemon/v1/daemon.proto b/cli/api/daemon/v1/daemon.proto index 482f472..1df8659 100644 --- a/cli/api/daemon/v1/daemon.proto +++ b/cli/api/daemon/v1/daemon.proto @@ -13,6 +13,12 @@ service DaemonService { // Shutdown initiates graceful daemon termination. rpc Shutdown(ShutdownRequest) returns (ShutdownResponse); + + // GetGraph returns the parsed task graph, using mtime for cache invalidation. + rpc GetGraph(GetGraphRequest) returns (GetGraphResponse); + + // GetEnvironment returns resolved Nix environment variables for a toolset. + rpc GetEnvironment(GetEnvironmentRequest) returns (GetEnvironmentResponse); } message PingRequest {} @@ -40,3 +46,41 @@ message ShutdownRequest { message ShutdownResponse { bool success = 1; } + +message ConfigMtime { + string path = 1; + int64 mtime_unix_nano = 2; +} + +message GetGraphRequest { + string cwd = 1; + repeated ConfigMtime config_mtimes = 2; +} + +message TaskProto { + string name = 1; + repeated string command = 2; + repeated string inputs = 3; + repeated string outputs = 4; + map tools = 5; + repeated string dependencies = 6; + map environment = 7; + string working_dir = 8; + string rebuild_strategy = 9; +} + +message GetGraphResponse { + bool cache_hit = 1; + string root = 2; + repeated TaskProto tasks = 3; +} + +message GetEnvironmentRequest { + string env_id = 1; + map tools = 2; +} + +message GetEnvironmentResponse { + bool cache_hit = 1; + repeated string env_vars = 2; +} diff --git a/cli/api/daemon/v1/daemon_grpc.pb.go b/cli/api/daemon/v1/daemon_grpc.pb.go index 280f9b9..6d5f50f 100644 --- a/cli/api/daemon/v1/daemon_grpc.pb.go +++ b/cli/api/daemon/v1/daemon_grpc.pb.go @@ -20,9 +20,11 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - DaemonService_Ping_FullMethodName = "/daemon.v1.DaemonService/Ping" - DaemonService_Status_FullMethodName = "/daemon.v1.DaemonService/Status" - DaemonService_Shutdown_FullMethodName = "/daemon.v1.DaemonService/Shutdown" + DaemonService_Ping_FullMethodName = "/daemon.v1.DaemonService/Ping" + DaemonService_Status_FullMethodName = "/daemon.v1.DaemonService/Status" + DaemonService_Shutdown_FullMethodName = "/daemon.v1.DaemonService/Shutdown" + DaemonService_GetGraph_FullMethodName = "/daemon.v1.DaemonService/GetGraph" + DaemonService_GetEnvironment_FullMethodName = "/daemon.v1.DaemonService/GetEnvironment" ) // DaemonServiceClient is the client API for DaemonService service. @@ -35,6 +37,10 @@ type DaemonServiceClient interface { Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) // Shutdown initiates graceful daemon termination. Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) + // GetGraph returns the parsed task graph, using mtime for cache invalidation. + GetGraph(ctx context.Context, in *GetGraphRequest, opts ...grpc.CallOption) (*GetGraphResponse, error) + // GetEnvironment returns resolved Nix environment variables for a toolset. + GetEnvironment(ctx context.Context, in *GetEnvironmentRequest, opts ...grpc.CallOption) (*GetEnvironmentResponse, error) } type daemonServiceClient struct { @@ -75,6 +81,26 @@ func (c *daemonServiceClient) Shutdown(ctx context.Context, in *ShutdownRequest, return out, nil } +func (c *daemonServiceClient) GetGraph(ctx context.Context, in *GetGraphRequest, opts ...grpc.CallOption) (*GetGraphResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetGraphResponse) + err := c.cc.Invoke(ctx, DaemonService_GetGraph_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) GetEnvironment(ctx context.Context, in *GetEnvironmentRequest, opts ...grpc.CallOption) (*GetEnvironmentResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetEnvironmentResponse) + err := c.cc.Invoke(ctx, DaemonService_GetEnvironment_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // DaemonServiceServer is the server API for DaemonService service. // All implementations must embed UnimplementedDaemonServiceServer // for forward compatibility. @@ -85,6 +111,10 @@ type DaemonServiceServer interface { Status(context.Context, *StatusRequest) (*StatusResponse, error) // Shutdown initiates graceful daemon termination. Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) + // GetGraph returns the parsed task graph, using mtime for cache invalidation. + GetGraph(context.Context, *GetGraphRequest) (*GetGraphResponse, error) + // GetEnvironment returns resolved Nix environment variables for a toolset. + GetEnvironment(context.Context, *GetEnvironmentRequest) (*GetEnvironmentResponse, error) mustEmbedUnimplementedDaemonServiceServer() } @@ -104,6 +134,12 @@ func (UnimplementedDaemonServiceServer) Status(context.Context, *StatusRequest) func (UnimplementedDaemonServiceServer) Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) { return nil, status.Error(codes.Unimplemented, "method Shutdown not implemented") } +func (UnimplementedDaemonServiceServer) GetGraph(context.Context, *GetGraphRequest) (*GetGraphResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetGraph not implemented") +} +func (UnimplementedDaemonServiceServer) GetEnvironment(context.Context, *GetEnvironmentRequest) (*GetEnvironmentResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetEnvironment not implemented") +} func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} func (UnimplementedDaemonServiceServer) testEmbeddedByValue() {} @@ -179,6 +215,42 @@ func _DaemonService_Shutdown_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _DaemonService_GetGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGraphRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).GetGraph(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_GetGraph_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).GetGraph(ctx, req.(*GetGraphRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_GetEnvironment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEnvironmentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).GetEnvironment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_GetEnvironment_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).GetEnvironment(ctx, req.(*GetEnvironmentRequest)) + } + return interceptor(ctx, in, info, handler) +} + // DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -198,6 +270,14 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{ MethodName: "Shutdown", Handler: _DaemonService_Shutdown_Handler, }, + { + MethodName: "GetGraph", + Handler: _DaemonService_GetGraph_Handler, + }, + { + MethodName: "GetEnvironment", + Handler: _DaemonService_GetEnvironment_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "api/daemon/v1/daemon.proto", diff --git a/cli/internal/adapters/config/loader.go b/cli/internal/adapters/config/loader.go index 402311d..bfc4e60 100644 --- a/cli/internal/adapters/config/loader.go +++ b/cli/internal/adapters/config/loader.go @@ -519,3 +519,81 @@ func validateRebuildStrategy(value string) (domain.RebuildStrategy, error) { return "", domain.ErrInvalidRebuildStrategy } } + +// DiscoverConfigPaths finds same.yaml and same.work.yaml paths from cwd. +// Returns paths and their mtimes for cache validation. +// It walks up the directory tree and finds all config files that would be loaded +// for a workspace (including workspace file and all project files). +func (l *Loader) DiscoverConfigPaths(cwd string) (map[string]int64, error) { + paths := make(map[string]int64) + + // First, find the workspace or standalone config + currentDir := cwd + var standaloneCandidate string + + for { + workfilePath := filepath.Join(currentDir, domain.WorkFileName) + if info, err := os.Stat(workfilePath); err == nil { + // Found workspace file, add it + paths[workfilePath] = info.ModTime().UnixNano() + + // For workspace mode, also find all project same.yaml files + if err := l.discoverWorkspaceProjectPaths(currentDir, paths); err != nil { + return nil, zerr.Wrap(err, "failed to discover project paths") + } + + return paths, nil + } + + if standaloneCandidate == "" { + samefilePath := filepath.Join(currentDir, domain.SameFileName) + if info, err := os.Stat(samefilePath); err == nil { + standaloneCandidate = samefilePath + paths[samefilePath] = info.ModTime().UnixNano() + } + } + + parentDir := filepath.Dir(currentDir) + if parentDir == currentDir { + // Reached root + break + } + currentDir = parentDir + } + + if standaloneCandidate != "" { + // Standalone mode, only one config file + return paths, nil + } + + return nil, zerr.With(domain.ErrConfigNotFound, "cwd", cwd) +} + +// discoverWorkspaceProjectPaths finds all same.yaml files in workspace projects. +func (l *Loader) discoverWorkspaceProjectPaths(workspaceRoot string, paths map[string]int64) error { + workfilePath := filepath.Join(workspaceRoot, domain.WorkFileName) + //nolint:gosec // G304: Path is constructed from validated workspace root, safe for use + workfileData, readErr := os.ReadFile(workfilePath) + if readErr != nil { + return zerr.Wrap(readErr, "failed to read workfile") + } + + var workfile Workfile + if unmarshalErr := yaml.Unmarshal(workfileData, &workfile); unmarshalErr != nil { + return zerr.Wrap(unmarshalErr, "failed to parse workfile") + } + + projectPaths, resolveErr := l.resolveProjectPaths(workspaceRoot, workfile.Projects) + if resolveErr != nil { + return resolveErr + } + + for _, projectPath := range projectPaths { + sameYamlPath := filepath.Join(projectPath, domain.SameFileName) + if info, statErr := os.Stat(sameYamlPath); statErr == nil { + paths[sameYamlPath] = info.ModTime().UnixNano() + } + } + + return nil +} diff --git a/cli/internal/adapters/daemon/cache.go b/cli/internal/adapters/daemon/cache.go new file mode 100644 index 0000000..2945724 --- /dev/null +++ b/cli/internal/adapters/daemon/cache.go @@ -0,0 +1,75 @@ +// Package daemon provides the daemon server and client implementations. +package daemon + +import ( + "sync" + + "go.trai.ch/same/internal/core/domain" +) + +// ServerCache holds thread-safe in-memory caches for the daemon server. +type ServerCache struct { + mu sync.RWMutex + graphCache map[string]*domain.GraphCacheEntry // cwd -> entry + envCache map[string][]string // envID -> env vars +} + +// NewServerCache creates a new ServerCache instance. +func NewServerCache() *ServerCache { + return &ServerCache{ + graphCache: make(map[string]*domain.GraphCacheEntry), + envCache: make(map[string][]string), + } +} + +// GetGraph retrieves a cached graph for the given cwd and validates mtimes. +// Returns the graph and true if cache hit and valid, nil and false otherwise. +func (c *ServerCache) GetGraph(cwd string, clientMtimes map[string]int64) (*domain.Graph, bool) { + c.mu.RLock() + defer c.mu.RUnlock() + + entry, exists := c.graphCache[cwd] + if !exists { + return nil, false + } + + // Validate mtimes match + if len(clientMtimes) != len(entry.Mtimes) { + return nil, false + } + + for path, clientMtime := range clientMtimes { + storedMtime, ok := entry.Mtimes[path] + if !ok || clientMtime != storedMtime { + return nil, false + } + } + + return entry.Graph, true +} + +// SetGraph stores a graph in the cache with its mtimes. +func (c *ServerCache) SetGraph(cwd string, entry *domain.GraphCacheEntry) { + c.mu.Lock() + defer c.mu.Unlock() + + c.graphCache[cwd] = entry +} + +// GetEnv retrieves cached environment variables for the given envID. +// Returns the env vars and true if cache hit, nil and false otherwise. +func (c *ServerCache) GetEnv(envID string) ([]string, bool) { + c.mu.RLock() + defer c.mu.RUnlock() + + env, exists := c.envCache[envID] + return env, exists +} + +// SetEnv stores environment variables in the cache. +func (c *ServerCache) SetEnv(envID string, env []string) { + c.mu.Lock() + defer c.mu.Unlock() + + c.envCache[envID] = env +} diff --git a/cli/internal/adapters/daemon/client.go b/cli/internal/adapters/daemon/client.go index a65ded1..172102a 100644 --- a/cli/internal/adapters/daemon/client.go +++ b/cli/internal/adapters/daemon/client.go @@ -67,6 +67,78 @@ func (c *Client) Shutdown(ctx context.Context) error { return err } +// GetGraph implements ports.DaemonClient. +func (c *Client) GetGraph(ctx context.Context, cwd string, configMtimes map[string]int64) (*domain.Graph, bool, error) { + // Build request + req := &daemonv1.GetGraphRequest{ + Cwd: cwd, + } + for path, mtime := range configMtimes { + req.ConfigMtimes = append(req.ConfigMtimes, &daemonv1.ConfigMtime{ + Path: path, + MtimeUnixNano: mtime, + }) + } + + // Call gRPC + resp, err := c.client.GetGraph(ctx, req) + if err != nil { + return nil, false, zerr.Wrap(err, "GetGraph RPC failed") + } + + // Convert response to domain.Graph + graph := domain.NewGraph() + for _, taskProto := range resp.Tasks { + task := &domain.Task{ + Name: domain.NewInternedString(taskProto.Name), + Command: taskProto.Command, + Inputs: c.stringsToInternedStrings(taskProto.Inputs), + Outputs: c.stringsToInternedStrings(taskProto.Outputs), + Tools: taskProto.Tools, + Dependencies: c.stringsToInternedStrings(taskProto.Dependencies), + Environment: taskProto.Environment, + WorkingDir: domain.NewInternedString(taskProto.WorkingDir), + RebuildStrategy: domain.RebuildStrategy(taskProto.RebuildStrategy), + } + if err := graph.AddTask(task); err != nil { + return nil, false, zerr.Wrap(err, "failed to add task to graph") + } + } + + // Set root (important: must be set after all tasks are added) + graph.SetRoot(resp.Root) + + return graph, resp.CacheHit, nil +} + +// GetEnvironment implements ports.DaemonClient. +func (c *Client) GetEnvironment( + ctx context.Context, + envID string, + tools map[string]string, +) (envVars []string, cacheHit bool, err error) { + req := &daemonv1.GetEnvironmentRequest{ + EnvId: envID, + Tools: tools, + } + + resp, err := c.client.GetEnvironment(ctx, req) + if err != nil { + return nil, false, zerr.Wrap(err, "GetEnvironment RPC failed") + } + + return resp.EnvVars, resp.CacheHit, nil +} + +// stringsToInternedStrings converts a slice of strings to InternedString. +func (c *Client) stringsToInternedStrings(strs []string) []domain.InternedString { + result := make([]domain.InternedString, len(strs)) + for i, s := range strs { + result[i] = domain.NewInternedString(s) + } + return result +} + // Close implements ports.DaemonClient. func (c *Client) Close() error { return c.conn.Close() diff --git a/cli/internal/adapters/daemon/server.go b/cli/internal/adapters/daemon/server.go index 4b92ba5..2f395c2 100644 --- a/cli/internal/adapters/daemon/server.go +++ b/cli/internal/adapters/daemon/server.go @@ -9,6 +9,7 @@ import ( "go.trai.ch/same/api/daemon/v1" "go.trai.ch/same/internal/core/domain" + "go.trai.ch/same/internal/core/ports" "go.trai.ch/zerr" "google.golang.org/grpc" ) @@ -16,9 +17,12 @@ import ( // Server implements the gRPC daemon service. type Server struct { daemonv1.UnimplementedDaemonServiceServer - lifecycle *Lifecycle - grpcServer *grpc.Server - listener net.Listener + lifecycle *Lifecycle + cache *ServerCache + configLoader ports.ConfigLoader + envFactory ports.EnvironmentFactory + grpcServer *grpc.Server + listener net.Listener } // NewServer creates a new daemon server. @@ -31,6 +35,23 @@ func NewServer(lifecycle *Lifecycle) *Server { return s } +// NewServerWithDeps creates a new daemon server with dependencies for handling graph and environment requests. +func NewServerWithDeps( + lifecycle *Lifecycle, + configLoader ports.ConfigLoader, + envFactory ports.EnvironmentFactory, +) *Server { + s := &Server{ + lifecycle: lifecycle, + cache: NewServerCache(), + configLoader: configLoader, + envFactory: envFactory, + grpcServer: grpc.NewServer(), + } + daemonv1.RegisterDaemonServiceServer(s.grpcServer, s) + return s +} + // Serve starts the gRPC server on the UDS. func (s *Server) Serve(ctx context.Context) error { socketPath := domain.DefaultDaemonSocketPath() @@ -124,3 +145,107 @@ func (s *Server) writePIDFile() error { pid := os.Getpid() return os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", pid)), domain.PrivateFilePerm) } + +// GetGraph implements DaemonService.GetGraph. +// +//nolint:revive // ctx is used to satisfy the interface but not actively used in this method +func (s *Server) GetGraph(ctx context.Context, req *daemonv1.GetGraphRequest) (*daemonv1.GetGraphResponse, error) { + // Convert proto mtimes to map + clientMtimes := make(map[string]int64) + for _, mtime := range req.ConfigMtimes { + clientMtimes[mtime.Path] = mtime.MtimeUnixNano + } + + // Reset inactivity timer + s.lifecycle.ResetTimer() + + // Check cache + if graph, cacheHit := s.cache.GetGraph(req.Cwd, clientMtimes); cacheHit { + return s.graphToResponse(graph, true), nil + } + + // Cache miss or stale, load the graph + graph, err := s.configLoader.Load(req.Cwd) + if err != nil { + return nil, zerr.Wrap(err, "failed to load graph") + } + + // Store in cache + entry := &domain.GraphCacheEntry{ + Graph: graph, + ConfigPaths: make([]string, 0, len(clientMtimes)), + Mtimes: clientMtimes, + } + for path := range clientMtimes { + entry.ConfigPaths = append(entry.ConfigPaths, path) + } + s.cache.SetGraph(req.Cwd, entry) + + return s.graphToResponse(graph, false), nil +} + +// GetEnvironment implements DaemonService.GetEnvironment. +func (s *Server) GetEnvironment( + ctx context.Context, + req *daemonv1.GetEnvironmentRequest, +) (*daemonv1.GetEnvironmentResponse, error) { + // Reset inactivity timer + s.lifecycle.ResetTimer() + + // Check cache + if envVars, cacheHit := s.cache.GetEnv(req.EnvId); cacheHit { + return &daemonv1.GetEnvironmentResponse{ + CacheHit: true, + EnvVars: envVars, + }, nil + } + + // Cache miss, resolve environment + envVars, err := s.envFactory.GetEnvironment(ctx, req.Tools) + if err != nil { + return nil, zerr.Wrap(err, "failed to get environment") + } + + // Store in cache + s.cache.SetEnv(req.EnvId, envVars) + + return &daemonv1.GetEnvironmentResponse{ + CacheHit: false, + EnvVars: envVars, + }, nil +} + +// graphToResponse converts a domain.Graph to a GetGraphResponse proto message. +func (s *Server) graphToResponse(graph *domain.Graph, cacheHit bool) *daemonv1.GetGraphResponse { + resp := &daemonv1.GetGraphResponse{ + CacheHit: cacheHit, + Root: graph.Root(), + } + + // Convert tasks + for task := range graph.Walk() { + taskProto := &daemonv1.TaskProto{ + Name: task.Name.String(), + Command: task.Command, + Inputs: s.internedStringsToStrings(task.Inputs), + Outputs: s.internedStringsToStrings(task.Outputs), + Tools: task.Tools, + Dependencies: s.internedStringsToStrings(task.Dependencies), + Environment: task.Environment, + WorkingDir: task.WorkingDir.String(), + RebuildStrategy: string(task.RebuildStrategy), + } + resp.Tasks = append(resp.Tasks, taskProto) + } + + return resp +} + +// internedStringsToStrings converts a slice of InternedString to plain strings. +func (s *Server) internedStringsToStrings(interned []domain.InternedString) []string { + result := make([]string, len(interned)) + for i, str := range interned { + result[i] = str.String() + } + return result +} diff --git a/cli/internal/app/app.go b/cli/internal/app/app.go index aa5a709..bec8e9b 100644 --- a/cli/internal/app/app.go +++ b/cli/internal/app/app.go @@ -85,10 +85,39 @@ type RunOptions struct { // //nolint:cyclop // orchestration function func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) error { - // 1. Load the graph - graph, err := a.configLoader.Load(".") - if err != nil { - return zerr.Wrap(err, "failed to load configuration") + // 1. Connect to daemon (if available) and load graph from daemon or fallback to local + var graph *domain.Graph + var client ports.DaemonClient + var err error + + client, clientErr := a.connector.Connect(ctx) + if clientErr == nil { + // Daemon is available, try to get graph from daemon + defer func() { + _ = client.Close() + }() + + // Discover config paths and mtimes + mtimes, mtimeErr := a.configLoader.DiscoverConfigPaths(".") + if mtimeErr != nil { + return zerr.Wrap(mtimeErr, "failed to discover config paths") + } + + // Try to get graph from daemon + graph, _, err = client.GetGraph(ctx, ".", mtimes) + if err != nil { + // Fallback to local loading if daemon fails + if graph, err = a.configLoader.Load("."); err != nil { + return zerr.Wrap(err, "failed to load configuration") + } + } + } else { + // Daemon not available, use local loading + var loadErr error + graph, loadErr = a.configLoader.Load(".") + if loadErr != nil { + return zerr.Wrap(loadErr, "failed to load configuration") + } } // 2. Validate targets @@ -139,6 +168,11 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er a.envFactory, ) + // Pass daemon client to scheduler if available + if client != nil { + sched.WithDaemon(client) + } + // 6. Run Renderer and Scheduler concurrently g, ctx := errgroup.WithContext(ctx) diff --git a/cli/internal/core/domain/cache.go b/cli/internal/core/domain/cache.go new file mode 100644 index 0000000..b69c649 --- /dev/null +++ b/cli/internal/core/domain/cache.go @@ -0,0 +1,9 @@ +// Package domain contains core domain types for caching. +package domain + +// GraphCacheEntry holds a cached graph with its validation metadata. +type GraphCacheEntry struct { + Graph *Graph + ConfigPaths []string // Paths to same.yaml / same.work.yaml + Mtimes map[string]int64 // path -> mtime in UnixNano +} diff --git a/cli/internal/core/ports/config_loader.go b/cli/internal/core/ports/config_loader.go index e617d17..4e51006 100644 --- a/cli/internal/core/ports/config_loader.go +++ b/cli/internal/core/ports/config_loader.go @@ -8,4 +8,8 @@ import "go.trai.ch/same/internal/core/domain" type ConfigLoader interface { // Load reads the configuration from the given working directory and returns the task graph. Load(cwd string) (*domain.Graph, error) + + // DiscoverConfigPaths finds configuration file paths and their modification times. + // Returns a map of config file paths to their mtime in UnixNano. + DiscoverConfigPaths(cwd string) (map[string]int64, error) } diff --git a/cli/internal/core/ports/daemon.go b/cli/internal/core/ports/daemon.go index 23e48c5..5a10737 100644 --- a/cli/internal/core/ports/daemon.go +++ b/cli/internal/core/ports/daemon.go @@ -3,6 +3,8 @@ package ports import ( "context" "time" + + "go.trai.ch/same/internal/core/domain" ) //go:generate mockgen -source=daemon.go -destination=mocks/mock_daemon.go -package=mocks @@ -27,6 +29,21 @@ type DaemonClient interface { // Shutdown requests a graceful daemon shutdown. Shutdown(ctx context.Context) error + // GetGraph retrieves the task graph from the daemon. + // configMtimes is a map of config file paths to their mtime (UnixNano). + GetGraph( + ctx context.Context, + cwd string, + configMtimes map[string]int64, + ) (graph *domain.Graph, cacheHit bool, err error) + + // GetEnvironment retrieves resolved Nix environment variables. + GetEnvironment( + ctx context.Context, + envID string, + tools map[string]string, + ) (envVars []string, cacheHit bool, err error) + // Close releases client resources. Close() error } diff --git a/cli/internal/engine/scheduler/scheduler.go b/cli/internal/engine/scheduler/scheduler.go index 1c6548b..f0078d1 100644 --- a/cli/internal/engine/scheduler/scheduler.go +++ b/cli/internal/engine/scheduler/scheduler.go @@ -39,6 +39,7 @@ type Scheduler struct { resolver ports.InputResolver tracer ports.Tracer envFactory ports.EnvironmentFactory + daemon ports.DaemonClient mu sync.RWMutex taskStatus map[domain.InternedString]TaskStatus @@ -67,6 +68,12 @@ func NewScheduler( return s } +// WithDaemon sets the daemon client for the scheduler and returns itself for chaining. +func (s *Scheduler) WithDaemon(daemon ports.DaemonClient) *Scheduler { + s.daemon = daemon + return s +} + // initTaskStatuses initializes the status of tasks in the graph to Pending. func (s *Scheduler) initTaskStatuses(tasks []domain.InternedString) { s.mu.Lock() @@ -267,6 +274,8 @@ func (state *schedulerRunState) runExecutionLoop() error { } // prepareEnvironments resolves all required environments concurrently. +// +//nolint:cyclop // complexity due to daemon fallback logic func (state *schedulerRunState) prepareEnvironments(ctx context.Context) error { // Identify unique environment IDs needed for this run neededEnvIDs := make(map[string]map[string]string) // envID -> tools map (sample) @@ -311,7 +320,21 @@ func (state *schedulerRunState) prepareEnvironments(ctx context.Context) error { return nil } - env, err := state.s.envFactory.GetEnvironment(ctx, item.tools) + var env []string + var err error + + // Try to use daemon client if available + if state.s.daemon != nil { + env, _, err = state.s.daemon.GetEnvironment(ctx, item.id, item.tools) + if err != nil { + // Fallback to local factory on daemon error + env, err = state.s.envFactory.GetEnvironment(ctx, item.tools) + } + } else { + // Use local factory when daemon is not available + env, err = state.s.envFactory.GetEnvironment(ctx, item.tools) + } + if err != nil { return zerr.Wrap(err, "failed to hydrate environment") } From 8957662208dd85c4a5409c60c79322b66178940f Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Tue, 27 Jan 2026 16:21:04 +0100 Subject: [PATCH 03/20] fix(#74): address code review findings Must Fix: - Add nil pointer guards in GetGraph/GetEnvironment RPC handlers Prevents panic when Server is created via NewServer() without deps Should Fix: - Fix daemon client availability check: verify both err and client - Add Graph.Validate() call after deserialization to compute dependencies - Use boolean flag for clearer daemon availability tracking - Document ctx parameter limitation in GetGraph (no cancellation propagation) - Document ServerCache mtime validation assumes shared filesystem view --- cli/internal/adapters/daemon/cache.go | 6 ++++++ cli/internal/adapters/daemon/client.go | 5 +++++ cli/internal/adapters/daemon/server.go | 17 ++++++++++++++++- cli/internal/app/app.go | 6 ++++-- 4 files changed, 31 insertions(+), 3 deletions(-) diff --git a/cli/internal/adapters/daemon/cache.go b/cli/internal/adapters/daemon/cache.go index 2945724..4f1dc28 100644 --- a/cli/internal/adapters/daemon/cache.go +++ b/cli/internal/adapters/daemon/cache.go @@ -8,6 +8,12 @@ import ( ) // ServerCache holds thread-safe in-memory caches for the daemon server. +// +// Cache Validation Assumption: +// The cache validation logic trusts client-provided mtime values and compares them +// against stored mtimes without verifying actual file mtimes on the server. +// This design assumes the daemon and client share the same filesystem view, which +// is valid for local Unix-socket daemons but would need revision for remote scenarios. type ServerCache struct { mu sync.RWMutex graphCache map[string]*domain.GraphCacheEntry // cwd -> entry diff --git a/cli/internal/adapters/daemon/client.go b/cli/internal/adapters/daemon/client.go index 172102a..6fb97cd 100644 --- a/cli/internal/adapters/daemon/client.go +++ b/cli/internal/adapters/daemon/client.go @@ -108,6 +108,11 @@ func (c *Client) GetGraph(ctx context.Context, cwd string, configMtimes map[stri // Set root (important: must be set after all tasks are added) graph.SetRoot(resp.Root) + // Validate the graph to compute executionOrder and dependents + if err := graph.Validate(); err != nil { + return nil, false, zerr.Wrap(err, "failed to validate reconstructed graph") + } + return graph, resp.CacheHit, nil } diff --git a/cli/internal/adapters/daemon/server.go b/cli/internal/adapters/daemon/server.go index 2f395c2..798e969 100644 --- a/cli/internal/adapters/daemon/server.go +++ b/cli/internal/adapters/daemon/server.go @@ -12,6 +12,8 @@ import ( "go.trai.ch/same/internal/core/ports" "go.trai.ch/zerr" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // Server implements the gRPC daemon service. @@ -147,9 +149,17 @@ func (s *Server) writePIDFile() error { } // GetGraph implements DaemonService.GetGraph. +// Note: ctx parameter satisfies the gRPC interface but is not currently used for cancellation +// because configLoader.Load() does not accept context. Future enhancement: add context support +// to ConfigLoader.Load() for proper cancellation propagation. // -//nolint:revive // ctx is used to satisfy the interface but not actively used in this method +//nolint:revive // ctx satisfies gRPC interface; see above note for future improvement func (s *Server) GetGraph(ctx context.Context, req *daemonv1.GetGraphRequest) (*daemonv1.GetGraphResponse, error) { + // Guard: ensure server is configured for graph operations + if s.cache == nil || s.configLoader == nil { + return nil, status.Error(codes.FailedPrecondition, "server not configured for graph operations") + } + // Convert proto mtimes to map clientMtimes := make(map[string]int64) for _, mtime := range req.ConfigMtimes { @@ -189,6 +199,11 @@ func (s *Server) GetEnvironment( ctx context.Context, req *daemonv1.GetEnvironmentRequest, ) (*daemonv1.GetEnvironmentResponse, error) { + // Guard: ensure server is configured for environment operations + if s.cache == nil || s.envFactory == nil { + return nil, status.Error(codes.FailedPrecondition, "server not configured for environment operations") + } + // Reset inactivity timer s.lifecycle.ResetTimer() diff --git a/cli/internal/app/app.go b/cli/internal/app/app.go index bec8e9b..2763e9e 100644 --- a/cli/internal/app/app.go +++ b/cli/internal/app/app.go @@ -88,11 +88,13 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er // 1. Connect to daemon (if available) and load graph from daemon or fallback to local var graph *domain.Graph var client ports.DaemonClient + var daemonAvailable bool var err error client, clientErr := a.connector.Connect(ctx) - if clientErr == nil { + if clientErr == nil && client != nil { // Daemon is available, try to get graph from daemon + daemonAvailable = true defer func() { _ = client.Close() }() @@ -169,7 +171,7 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er ) // Pass daemon client to scheduler if available - if client != nil { + if daemonAvailable { sched.WithDaemon(client) } From 1dec8b934e6a44280f24d12757581106eb4ff66c Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Tue, 27 Jan 2026 16:29:29 +0100 Subject: [PATCH 04/20] refactor(#74): address non-blocking review suggestions 1. Consistent guard order: Both GetGraph and GetEnvironment now check cache first (line 159, 203), then their specific dependencies in struct declaration order (configLoader, envFactory respectively). 2. Explicit return semantics: GetGraph now uses named return values (graph *domain.Graph, cacheHit bool, err error) to match GetEnvironment's pattern, making the three-value return (result, cacheHit, error) explicit. --- cli/internal/adapters/daemon/client.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cli/internal/adapters/daemon/client.go b/cli/internal/adapters/daemon/client.go index 6fb97cd..7c4880b 100644 --- a/cli/internal/adapters/daemon/client.go +++ b/cli/internal/adapters/daemon/client.go @@ -68,7 +68,11 @@ func (c *Client) Shutdown(ctx context.Context) error { } // GetGraph implements ports.DaemonClient. -func (c *Client) GetGraph(ctx context.Context, cwd string, configMtimes map[string]int64) (*domain.Graph, bool, error) { +func (c *Client) GetGraph( + ctx context.Context, + cwd string, + configMtimes map[string]int64, +) (graph *domain.Graph, cacheHit bool, err error) { // Build request req := &daemonv1.GetGraphRequest{ Cwd: cwd, @@ -87,7 +91,7 @@ func (c *Client) GetGraph(ctx context.Context, cwd string, configMtimes map[stri } // Convert response to domain.Graph - graph := domain.NewGraph() + graph = domain.NewGraph() for _, taskProto := range resp.Tasks { task := &domain.Task{ Name: domain.NewInternedString(taskProto.Name), From 9af4eee7f3a7e048565b5db1438a81235783e947 Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 00:10:48 +0100 Subject: [PATCH 05/20] feat(#74): implement proactive input hashing via file-system watching Add background file-watching service in daemon that proactively computes and caches input hashes, eliminating blocking hash computation during runs. - Add watcher adapter using fsnotify for recursive directory monitoring - Implement debouncer to coalesce rapid file changes (50ms window) - Add hash cache with path-to-task index for efficient invalidation - Extend gRPC API with GetInputHash RPC (Ready/Pending/Unknown states) - Wire watcher service into daemon server lifecycle - Use unique.Handle[string] for path interning to reduce memory --- cli/api/daemon/v1/daemon.pb.go | 266 ++++++++++++++++--- cli/api/daemon/v1/daemon.proto | 19 ++ cli/api/daemon/v1/daemon_grpc.pb.go | 40 +++ cli/cmd/same/commands/commands_test.go | 5 + cli/go.mod | 1 + cli/go.sum | 2 + cli/internal/adapters/daemon/client.go | 34 +++ cli/internal/adapters/daemon/server.go | 50 ++++ cli/internal/adapters/watcher/debouncer.go | 100 +++++++ cli/internal/adapters/watcher/hash_cache.go | 277 ++++++++++++++++++++ cli/internal/adapters/watcher/node.go | 65 +++++ cli/internal/adapters/watcher/watcher.go | 186 +++++++++++++ cli/internal/app/app_test.go | 6 + cli/internal/core/domain/hash_cache.go | 16 ++ cli/internal/core/ports/daemon.go | 7 + cli/internal/core/ports/input_hash_cache.go | 35 +++ cli/internal/core/ports/watcher.go | 39 +++ 17 files changed, 1108 insertions(+), 40 deletions(-) create mode 100644 cli/internal/adapters/watcher/debouncer.go create mode 100644 cli/internal/adapters/watcher/hash_cache.go create mode 100644 cli/internal/adapters/watcher/node.go create mode 100644 cli/internal/adapters/watcher/watcher.go create mode 100644 cli/internal/core/domain/hash_cache.go create mode 100644 cli/internal/core/ports/input_hash_cache.go create mode 100644 cli/internal/core/ports/watcher.go diff --git a/cli/api/daemon/v1/daemon.pb.go b/cli/api/daemon/v1/daemon.pb.go index 7248b6b..290e321 100644 --- a/cli/api/daemon/v1/daemon.pb.go +++ b/cli/api/daemon/v1/daemon.pb.go @@ -22,6 +22,55 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type GetInputHashResponse_State int32 + +const ( + GetInputHashResponse_READY GetInputHashResponse_State = 0 + GetInputHashResponse_PENDING GetInputHashResponse_State = 1 + GetInputHashResponse_UNKNOWN GetInputHashResponse_State = 2 +) + +// Enum value maps for GetInputHashResponse_State. +var ( + GetInputHashResponse_State_name = map[int32]string{ + 0: "READY", + 1: "PENDING", + 2: "UNKNOWN", + } + GetInputHashResponse_State_value = map[string]int32{ + "READY": 0, + "PENDING": 1, + "UNKNOWN": 2, + } +) + +func (x GetInputHashResponse_State) Enum() *GetInputHashResponse_State { + p := new(GetInputHashResponse_State) + *p = x + return p +} + +func (x GetInputHashResponse_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GetInputHashResponse_State) Descriptor() protoreflect.EnumDescriptor { + return file_api_daemon_v1_daemon_proto_enumTypes[0].Descriptor() +} + +func (GetInputHashResponse_State) Type() protoreflect.EnumType { + return &file_api_daemon_v1_daemon_proto_enumTypes[0] +} + +func (x GetInputHashResponse_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GetInputHashResponse_State.Descriptor instead. +func (GetInputHashResponse_State) EnumDescriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{13, 0} +} + type PingRequest struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -680,6 +729,118 @@ func (x *GetEnvironmentResponse) GetEnvVars() []string { return nil } +type GetInputHashRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskName string `protobuf:"bytes,1,opt,name=task_name,json=taskName,proto3" json:"task_name,omitempty"` + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + Environment map[string]string `protobuf:"bytes,3,rep,name=environment,proto3" json:"environment,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetInputHashRequest) Reset() { + *x = GetInputHashRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetInputHashRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetInputHashRequest) ProtoMessage() {} + +func (x *GetInputHashRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetInputHashRequest.ProtoReflect.Descriptor instead. +func (*GetInputHashRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{12} +} + +func (x *GetInputHashRequest) GetTaskName() string { + if x != nil { + return x.TaskName + } + return "" +} + +func (x *GetInputHashRequest) GetRoot() string { + if x != nil { + return x.Root + } + return "" +} + +func (x *GetInputHashRequest) GetEnvironment() map[string]string { + if x != nil { + return x.Environment + } + return nil +} + +type GetInputHashResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + State GetInputHashResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=daemon.v1.GetInputHashResponse_State" json:"state,omitempty"` + Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetInputHashResponse) Reset() { + *x = GetInputHashResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetInputHashResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetInputHashResponse) ProtoMessage() {} + +func (x *GetInputHashResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetInputHashResponse.ProtoReflect.Descriptor instead. +func (*GetInputHashResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{13} +} + +func (x *GetInputHashResponse) GetState() GetInputHashResponse_State { + if x != nil { + return x.State + } + return GetInputHashResponse_READY +} + +func (x *GetInputHashResponse) GetHash() string { + if x != nil { + return x.Hash + } + return "" +} + var File_api_daemon_v1_daemon_proto protoreflect.FileDescriptor const file_api_daemon_v1_daemon_proto_rawDesc = "" + @@ -736,13 +897,28 @@ const file_api_daemon_v1_daemon_proto_rawDesc = "" + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"P\n" + "\x16GetEnvironmentResponse\x12\x1b\n" + "\tcache_hit\x18\x01 \x01(\bR\bcacheHit\x12\x19\n" + - "\benv_vars\x18\x02 \x03(\tR\aenvVars2\xe8\x02\n" + + "\benv_vars\x18\x02 \x03(\tR\aenvVars\"\xd9\x01\n" + + "\x13GetInputHashRequest\x12\x1b\n" + + "\ttask_name\x18\x01 \x01(\tR\btaskName\x12\x12\n" + + "\x04root\x18\x02 \x01(\tR\x04root\x12Q\n" + + "\venvironment\x18\x03 \x03(\v2/.daemon.v1.GetInputHashRequest.EnvironmentEntryR\venvironment\x1a>\n" + + "\x10EnvironmentEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x95\x01\n" + + "\x14GetInputHashResponse\x12;\n" + + "\x05state\x18\x01 \x01(\x0e2%.daemon.v1.GetInputHashResponse.StateR\x05state\x12\x12\n" + + "\x04hash\x18\x02 \x01(\tR\x04hash\",\n" + + "\x05State\x12\t\n" + + "\x05READY\x10\x00\x12\v\n" + + "\aPENDING\x10\x01\x12\v\n" + + "\aUNKNOWN\x10\x022\xb9\x03\n" + "\rDaemonService\x127\n" + "\x04Ping\x12\x16.daemon.v1.PingRequest\x1a\x17.daemon.v1.PingResponse\x12=\n" + "\x06Status\x12\x18.daemon.v1.StatusRequest\x1a\x19.daemon.v1.StatusResponse\x12C\n" + "\bShutdown\x12\x1a.daemon.v1.ShutdownRequest\x1a\x1b.daemon.v1.ShutdownResponse\x12C\n" + "\bGetGraph\x12\x1a.daemon.v1.GetGraphRequest\x1a\x1b.daemon.v1.GetGraphResponse\x12U\n" + - "\x0eGetEnvironment\x12 .daemon.v1.GetEnvironmentRequest\x1a!.daemon.v1.GetEnvironmentResponseB(Z&go.trai.ch/same/api/daemon/v1;daemonv1b\x06proto3" + "\x0eGetEnvironment\x12 .daemon.v1.GetEnvironmentRequest\x1a!.daemon.v1.GetEnvironmentResponse\x12O\n" + + "\fGetInputHash\x12\x1e.daemon.v1.GetInputHashRequest\x1a\x1f.daemon.v1.GetInputHashResponseB(Z&go.trai.ch/same/api/daemon/v1;daemonv1b\x06proto3" var ( file_api_daemon_v1_daemon_proto_rawDescOnce sync.Once @@ -756,45 +932,54 @@ func file_api_daemon_v1_daemon_proto_rawDescGZIP() []byte { return file_api_daemon_v1_daemon_proto_rawDescData } -var file_api_daemon_v1_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_api_daemon_v1_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_api_daemon_v1_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 18) var file_api_daemon_v1_daemon_proto_goTypes = []any{ - (*PingRequest)(nil), // 0: daemon.v1.PingRequest - (*PingResponse)(nil), // 1: daemon.v1.PingResponse - (*StatusRequest)(nil), // 2: daemon.v1.StatusRequest - (*StatusResponse)(nil), // 3: daemon.v1.StatusResponse - (*ShutdownRequest)(nil), // 4: daemon.v1.ShutdownRequest - (*ShutdownResponse)(nil), // 5: daemon.v1.ShutdownResponse - (*ConfigMtime)(nil), // 6: daemon.v1.ConfigMtime - (*GetGraphRequest)(nil), // 7: daemon.v1.GetGraphRequest - (*TaskProto)(nil), // 8: daemon.v1.TaskProto - (*GetGraphResponse)(nil), // 9: daemon.v1.GetGraphResponse - (*GetEnvironmentRequest)(nil), // 10: daemon.v1.GetEnvironmentRequest - (*GetEnvironmentResponse)(nil), // 11: daemon.v1.GetEnvironmentResponse - nil, // 12: daemon.v1.TaskProto.ToolsEntry - nil, // 13: daemon.v1.TaskProto.EnvironmentEntry - nil, // 14: daemon.v1.GetEnvironmentRequest.ToolsEntry + (GetInputHashResponse_State)(0), // 0: daemon.v1.GetInputHashResponse.State + (*PingRequest)(nil), // 1: daemon.v1.PingRequest + (*PingResponse)(nil), // 2: daemon.v1.PingResponse + (*StatusRequest)(nil), // 3: daemon.v1.StatusRequest + (*StatusResponse)(nil), // 4: daemon.v1.StatusResponse + (*ShutdownRequest)(nil), // 5: daemon.v1.ShutdownRequest + (*ShutdownResponse)(nil), // 6: daemon.v1.ShutdownResponse + (*ConfigMtime)(nil), // 7: daemon.v1.ConfigMtime + (*GetGraphRequest)(nil), // 8: daemon.v1.GetGraphRequest + (*TaskProto)(nil), // 9: daemon.v1.TaskProto + (*GetGraphResponse)(nil), // 10: daemon.v1.GetGraphResponse + (*GetEnvironmentRequest)(nil), // 11: daemon.v1.GetEnvironmentRequest + (*GetEnvironmentResponse)(nil), // 12: daemon.v1.GetEnvironmentResponse + (*GetInputHashRequest)(nil), // 13: daemon.v1.GetInputHashRequest + (*GetInputHashResponse)(nil), // 14: daemon.v1.GetInputHashResponse + nil, // 15: daemon.v1.TaskProto.ToolsEntry + nil, // 16: daemon.v1.TaskProto.EnvironmentEntry + nil, // 17: daemon.v1.GetEnvironmentRequest.ToolsEntry + nil, // 18: daemon.v1.GetInputHashRequest.EnvironmentEntry } var file_api_daemon_v1_daemon_proto_depIdxs = []int32{ - 6, // 0: daemon.v1.GetGraphRequest.config_mtimes:type_name -> daemon.v1.ConfigMtime - 12, // 1: daemon.v1.TaskProto.tools:type_name -> daemon.v1.TaskProto.ToolsEntry - 13, // 2: daemon.v1.TaskProto.environment:type_name -> daemon.v1.TaskProto.EnvironmentEntry - 8, // 3: daemon.v1.GetGraphResponse.tasks:type_name -> daemon.v1.TaskProto - 14, // 4: daemon.v1.GetEnvironmentRequest.tools:type_name -> daemon.v1.GetEnvironmentRequest.ToolsEntry - 0, // 5: daemon.v1.DaemonService.Ping:input_type -> daemon.v1.PingRequest - 2, // 6: daemon.v1.DaemonService.Status:input_type -> daemon.v1.StatusRequest - 4, // 7: daemon.v1.DaemonService.Shutdown:input_type -> daemon.v1.ShutdownRequest - 7, // 8: daemon.v1.DaemonService.GetGraph:input_type -> daemon.v1.GetGraphRequest - 10, // 9: daemon.v1.DaemonService.GetEnvironment:input_type -> daemon.v1.GetEnvironmentRequest - 1, // 10: daemon.v1.DaemonService.Ping:output_type -> daemon.v1.PingResponse - 3, // 11: daemon.v1.DaemonService.Status:output_type -> daemon.v1.StatusResponse - 5, // 12: daemon.v1.DaemonService.Shutdown:output_type -> daemon.v1.ShutdownResponse - 9, // 13: daemon.v1.DaemonService.GetGraph:output_type -> daemon.v1.GetGraphResponse - 11, // 14: daemon.v1.DaemonService.GetEnvironment:output_type -> daemon.v1.GetEnvironmentResponse - 10, // [10:15] is the sub-list for method output_type - 5, // [5:10] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 7, // 0: daemon.v1.GetGraphRequest.config_mtimes:type_name -> daemon.v1.ConfigMtime + 15, // 1: daemon.v1.TaskProto.tools:type_name -> daemon.v1.TaskProto.ToolsEntry + 16, // 2: daemon.v1.TaskProto.environment:type_name -> daemon.v1.TaskProto.EnvironmentEntry + 9, // 3: daemon.v1.GetGraphResponse.tasks:type_name -> daemon.v1.TaskProto + 17, // 4: daemon.v1.GetEnvironmentRequest.tools:type_name -> daemon.v1.GetEnvironmentRequest.ToolsEntry + 18, // 5: daemon.v1.GetInputHashRequest.environment:type_name -> daemon.v1.GetInputHashRequest.EnvironmentEntry + 0, // 6: daemon.v1.GetInputHashResponse.state:type_name -> daemon.v1.GetInputHashResponse.State + 1, // 7: daemon.v1.DaemonService.Ping:input_type -> daemon.v1.PingRequest + 3, // 8: daemon.v1.DaemonService.Status:input_type -> daemon.v1.StatusRequest + 5, // 9: daemon.v1.DaemonService.Shutdown:input_type -> daemon.v1.ShutdownRequest + 8, // 10: daemon.v1.DaemonService.GetGraph:input_type -> daemon.v1.GetGraphRequest + 11, // 11: daemon.v1.DaemonService.GetEnvironment:input_type -> daemon.v1.GetEnvironmentRequest + 13, // 12: daemon.v1.DaemonService.GetInputHash:input_type -> daemon.v1.GetInputHashRequest + 2, // 13: daemon.v1.DaemonService.Ping:output_type -> daemon.v1.PingResponse + 4, // 14: daemon.v1.DaemonService.Status:output_type -> daemon.v1.StatusResponse + 6, // 15: daemon.v1.DaemonService.Shutdown:output_type -> daemon.v1.ShutdownResponse + 10, // 16: daemon.v1.DaemonService.GetGraph:output_type -> daemon.v1.GetGraphResponse + 12, // 17: daemon.v1.DaemonService.GetEnvironment:output_type -> daemon.v1.GetEnvironmentResponse + 14, // 18: daemon.v1.DaemonService.GetInputHash:output_type -> daemon.v1.GetInputHashResponse + 13, // [13:19] is the sub-list for method output_type + 7, // [7:13] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_api_daemon_v1_daemon_proto_init() } @@ -807,13 +992,14 @@ func file_api_daemon_v1_daemon_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_daemon_v1_daemon_proto_rawDesc), len(file_api_daemon_v1_daemon_proto_rawDesc)), - NumEnums: 0, - NumMessages: 15, + NumEnums: 1, + NumMessages: 18, NumExtensions: 0, NumServices: 1, }, GoTypes: file_api_daemon_v1_daemon_proto_goTypes, DependencyIndexes: file_api_daemon_v1_daemon_proto_depIdxs, + EnumInfos: file_api_daemon_v1_daemon_proto_enumTypes, MessageInfos: file_api_daemon_v1_daemon_proto_msgTypes, }.Build() File_api_daemon_v1_daemon_proto = out.File diff --git a/cli/api/daemon/v1/daemon.proto b/cli/api/daemon/v1/daemon.proto index 1df8659..0fe1727 100644 --- a/cli/api/daemon/v1/daemon.proto +++ b/cli/api/daemon/v1/daemon.proto @@ -19,6 +19,9 @@ service DaemonService { // GetEnvironment returns resolved Nix environment variables for a toolset. rpc GetEnvironment(GetEnvironmentRequest) returns (GetEnvironmentResponse); + + // GetInputHash returns the cached or pending input hash for a task. + rpc GetInputHash(GetInputHashRequest) returns (GetInputHashResponse); } message PingRequest {} @@ -84,3 +87,19 @@ message GetEnvironmentResponse { bool cache_hit = 1; repeated string env_vars = 2; } + +message GetInputHashRequest { + string task_name = 1; + string root = 2; + map environment = 3; +} + +message GetInputHashResponse { + enum State { + READY = 0; + PENDING = 1; + UNKNOWN = 2; + } + State state = 1; + string hash = 2; +} diff --git a/cli/api/daemon/v1/daemon_grpc.pb.go b/cli/api/daemon/v1/daemon_grpc.pb.go index 6d5f50f..47467fc 100644 --- a/cli/api/daemon/v1/daemon_grpc.pb.go +++ b/cli/api/daemon/v1/daemon_grpc.pb.go @@ -25,6 +25,7 @@ const ( DaemonService_Shutdown_FullMethodName = "/daemon.v1.DaemonService/Shutdown" DaemonService_GetGraph_FullMethodName = "/daemon.v1.DaemonService/GetGraph" DaemonService_GetEnvironment_FullMethodName = "/daemon.v1.DaemonService/GetEnvironment" + DaemonService_GetInputHash_FullMethodName = "/daemon.v1.DaemonService/GetInputHash" ) // DaemonServiceClient is the client API for DaemonService service. @@ -41,6 +42,8 @@ type DaemonServiceClient interface { GetGraph(ctx context.Context, in *GetGraphRequest, opts ...grpc.CallOption) (*GetGraphResponse, error) // GetEnvironment returns resolved Nix environment variables for a toolset. GetEnvironment(ctx context.Context, in *GetEnvironmentRequest, opts ...grpc.CallOption) (*GetEnvironmentResponse, error) + // GetInputHash returns the cached or pending input hash for a task. + GetInputHash(ctx context.Context, in *GetInputHashRequest, opts ...grpc.CallOption) (*GetInputHashResponse, error) } type daemonServiceClient struct { @@ -101,6 +104,16 @@ func (c *daemonServiceClient) GetEnvironment(ctx context.Context, in *GetEnviron return out, nil } +func (c *daemonServiceClient) GetInputHash(ctx context.Context, in *GetInputHashRequest, opts ...grpc.CallOption) (*GetInputHashResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetInputHashResponse) + err := c.cc.Invoke(ctx, DaemonService_GetInputHash_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // DaemonServiceServer is the server API for DaemonService service. // All implementations must embed UnimplementedDaemonServiceServer // for forward compatibility. @@ -115,6 +128,8 @@ type DaemonServiceServer interface { GetGraph(context.Context, *GetGraphRequest) (*GetGraphResponse, error) // GetEnvironment returns resolved Nix environment variables for a toolset. GetEnvironment(context.Context, *GetEnvironmentRequest) (*GetEnvironmentResponse, error) + // GetInputHash returns the cached or pending input hash for a task. + GetInputHash(context.Context, *GetInputHashRequest) (*GetInputHashResponse, error) mustEmbedUnimplementedDaemonServiceServer() } @@ -140,6 +155,9 @@ func (UnimplementedDaemonServiceServer) GetGraph(context.Context, *GetGraphReque func (UnimplementedDaemonServiceServer) GetEnvironment(context.Context, *GetEnvironmentRequest) (*GetEnvironmentResponse, error) { return nil, status.Error(codes.Unimplemented, "method GetEnvironment not implemented") } +func (UnimplementedDaemonServiceServer) GetInputHash(context.Context, *GetInputHashRequest) (*GetInputHashResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetInputHash not implemented") +} func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} func (UnimplementedDaemonServiceServer) testEmbeddedByValue() {} @@ -251,6 +269,24 @@ func _DaemonService_GetEnvironment_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _DaemonService_GetInputHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInputHashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).GetInputHash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_GetInputHash_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).GetInputHash(ctx, req.(*GetInputHashRequest)) + } + return interceptor(ctx, in, info, handler) +} + // DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -278,6 +314,10 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetEnvironment", Handler: _DaemonService_GetEnvironment_Handler, }, + { + MethodName: "GetInputHash", + Handler: _DaemonService_GetInputHash_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "api/daemon/v1/daemon.proto", diff --git a/cli/cmd/same/commands/commands_test.go b/cli/cmd/same/commands/commands_test.go index 5ea294b..162a15b 100644 --- a/cli/cmd/same/commands/commands_test.go +++ b/cli/cmd/same/commands/commands_test.go @@ -2,6 +2,7 @@ package commands_test import ( "context" + "errors" "io" "os" "path/filepath" @@ -42,6 +43,9 @@ func TestRun_Success(t *testing.T) { cli := commands.New(a) // Setup strict expectations in the correct sequence + // 0. Daemon connection fails (daemon not available, fallback to local) + mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) + // 1. Loader.Load is called first mockLoader.EXPECT().Load(".").Return(g, nil).Times(1) @@ -379,6 +383,7 @@ func TestRun_OutputModeFlags(t *testing.T) { cli := commands.New(a) + mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) mockLoader.EXPECT().Load(".").Return(g, nil).Times(1) mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil).Times(1) mockHasher.EXPECT().ComputeInputHash(gomock.Any(), gomock.Any(), gomock.Any()).Return("hash123", nil).Times(1) diff --git a/cli/go.mod b/cli/go.mod index b006bfa..8afbf35 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -7,6 +7,7 @@ require ( github.com/charmbracelet/bubbletea v1.3.10 github.com/charmbracelet/lipgloss v1.1.0 github.com/creack/pty v1.1.24 + github.com/fsnotify/fsnotify v1.9.0 github.com/grindlemire/graft v0.2.3 github.com/muesli/termenv v0.16.0 github.com/rogpeppe/go-internal v1.14.1 diff --git a/cli/go.sum b/cli/go.sum index 7128630..6631da7 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -29,6 +29,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/cli/internal/adapters/daemon/client.go b/cli/internal/adapters/daemon/client.go index 7c4880b..9b639d8 100644 --- a/cli/internal/adapters/daemon/client.go +++ b/cli/internal/adapters/daemon/client.go @@ -139,6 +139,40 @@ func (c *Client) GetEnvironment( return resp.EnvVars, resp.CacheHit, nil } +// GetInputHash implements ports.DaemonClient. +func (c *Client) GetInputHash( + ctx context.Context, + taskName, root string, + env map[string]string, +) (ports.InputHashResult, error) { + req := &daemonv1.GetInputHashRequest{ + TaskName: taskName, + Root: root, + Environment: env, + } + + resp, err := c.client.GetInputHash(ctx, req) + if err != nil { + return ports.InputHashResult{State: ports.HashUnknown}, zerr.Wrap(err, "GetInputHash RPC failed") + } + + // Convert the proto enum to the ports.InputHashState + var state ports.InputHashState + switch resp.State { + case daemonv1.GetInputHashResponse_READY: + state = ports.HashReady + case daemonv1.GetInputHashResponse_PENDING: + state = ports.HashPending + default: + state = ports.HashUnknown + } + + return ports.InputHashResult{ + State: state, + Hash: resp.Hash, + }, nil +} + // stringsToInternedStrings converts a slice of strings to InternedString. func (c *Client) stringsToInternedStrings(strs []string) []domain.InternedString { result := make([]domain.InternedString, len(strs)) diff --git a/cli/internal/adapters/daemon/server.go b/cli/internal/adapters/daemon/server.go index 798e969..8dad80d 100644 --- a/cli/internal/adapters/daemon/server.go +++ b/cli/internal/adapters/daemon/server.go @@ -8,6 +8,7 @@ import ( "path/filepath" "go.trai.ch/same/api/daemon/v1" + "go.trai.ch/same/internal/adapters/watcher" "go.trai.ch/same/internal/core/domain" "go.trai.ch/same/internal/core/ports" "go.trai.ch/zerr" @@ -23,10 +24,18 @@ type Server struct { cache *ServerCache configLoader ports.ConfigLoader envFactory ports.EnvironmentFactory + watcherSvc *WatcherService grpcServer *grpc.Server listener net.Listener } +// WatcherService bundles the watcher, debouncer, and hash cache together. +type WatcherService struct { + Watcher ports.Watcher + Debouncer *watcher.Debouncer + HashCache ports.InputHashCache +} + // NewServer creates a new daemon server. func NewServer(lifecycle *Lifecycle) *Server { s := &Server{ @@ -264,3 +273,44 @@ func (s *Server) internedStringsToStrings(interned []domain.InternedString) []st } return result } + +// SetWatcherService sets the watcher service for the server. +// This must be called before Serve if the watcher service is needed. +func (s *Server) SetWatcherService(watcherSvc *WatcherService) { + s.watcherSvc = watcherSvc +} + +// GetInputHash implements DaemonService.GetInputHash. +// +//nolint:revive,unparam // ctx satisfies gRPC interface requirement +func (s *Server) GetInputHash( + ctx context.Context, + req *daemonv1.GetInputHashRequest, +) (*daemonv1.GetInputHashResponse, error) { + s.lifecycle.ResetTimer() + + // Guard: ensure watcher service is configured + if s.watcherSvc == nil { + return nil, status.Error(codes.FailedPrecondition, "watcher service not initialized") + } + + // Get the hash result from the cache using the request's context. + // This avoids race conditions by passing root/env directly. + result := s.watcherSvc.HashCache.GetInputHash(req.TaskName, req.Root, req.Environment) + + // Convert the ports.InputHashState to the proto enum + var state daemonv1.GetInputHashResponse_State + switch result.State { + case ports.HashReady: + state = daemonv1.GetInputHashResponse_READY + case ports.HashPending: + state = daemonv1.GetInputHashResponse_PENDING + default: + state = daemonv1.GetInputHashResponse_UNKNOWN + } + + return &daemonv1.GetInputHashResponse{ + State: state, + Hash: result.Hash, + }, nil +} diff --git a/cli/internal/adapters/watcher/debouncer.go b/cli/internal/adapters/watcher/debouncer.go new file mode 100644 index 0000000..17bb9ce --- /dev/null +++ b/cli/internal/adapters/watcher/debouncer.go @@ -0,0 +1,100 @@ +// Package watcher implements file system watching for proactive input hashing. +package watcher + +import ( + "sync" + "time" + "unique" +) + +// Debouncer coalesces rapid file system events into batched invalidations. +type Debouncer struct { + mu sync.Mutex + pending map[unique.Handle[string]]struct{} + timer *time.Timer + window time.Duration + callback func(paths []string) +} + +// NewDebouncer creates a new debouncer with the given time window and callback. +func NewDebouncer(window time.Duration, callback func(paths []string)) *Debouncer { + return &Debouncer{ + pending: make(map[unique.Handle[string]]struct{}), + window: window, + callback: callback, + } +} + +// Add adds a file path to the pending events set. +func (d *Debouncer) Add(path string) { + d.mu.Lock() + defer d.mu.Unlock() + + // Add the path to the pending set using an interned handle for deduplication. + handle := unique.Make(path) + d.pending[handle] = struct{}{} + + // Reset the timer if it exists, or create a new one. + if d.timer != nil { + d.timer.Stop() + } + d.timer = time.AfterFunc(d.window, d.fire) +} + +// fire is called when the debounce window expires. +func (d *Debouncer) fire() { + d.mu.Lock() + + // Check if there's anything to process (protects against race with Flush). + if len(d.pending) == 0 { + d.timer = nil + d.mu.Unlock() + return + } + + // Convert the pending set to a slice of paths. + paths := make([]string, 0, len(d.pending)) + for handle := range d.pending { + paths = append(paths, handle.Value()) + } + + // Clear the pending set and timer. + d.pending = make(map[unique.Handle[string]]struct{}) + d.timer = nil + d.mu.Unlock() + + // Call the callback with the coalesced paths (asynchronously to match Flush behavior). + if len(paths) > 0 && d.callback != nil { + go d.callback(paths) + } +} + +// Flush immediately triggers the debounce callback with all pending paths. +// This method blocks until the callback completes, making it suitable for +// graceful shutdown scenarios where work must finish before proceeding. +func (d *Debouncer) Flush() { + d.mu.Lock() + if d.timer != nil { + if !d.timer.Stop() { + // Timer already fired, let it complete rather than processing twice. + d.mu.Unlock() + return + } + d.timer = nil + } + + // Extract paths to process. + paths := make([]string, 0, len(d.pending)) + for handle := range d.pending { + paths = append(paths, handle.Value()) + } + d.pending = make(map[unique.Handle[string]]struct{}) + d.mu.Unlock() + + // Call the callback synchronously (blocks until complete). + // This differs from fire() which is async, but is intentional for + // flush scenarios where completion is required before proceeding. + if len(paths) > 0 && d.callback != nil { + d.callback(paths) + } +} diff --git a/cli/internal/adapters/watcher/hash_cache.go b/cli/internal/adapters/watcher/hash_cache.go new file mode 100644 index 0000000..0495339 --- /dev/null +++ b/cli/internal/adapters/watcher/hash_cache.go @@ -0,0 +1,277 @@ +package watcher + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "sort" + "sync" + "time" + "unique" + + "go.trai.ch/same/internal/core/domain" + "go.trai.ch/same/internal/core/ports" +) + +// PendingRehash represents a cache entry that needs to be recomputed. +type PendingRehash struct { + TaskName string + Root string + Env map[string]string +} + +// HashCache implements ports.InputHashCache with background rehashing. +type HashCache struct { + mu sync.RWMutex + entries map[unique.Handle[string]]*domain.TaskHashEntry + pathToTasks map[unique.Handle[string]][]cacheEntry + cacheKeyContext map[unique.Handle[string]]PendingRehash // Maps cache key to its context for invalidation + pendingRehashes []PendingRehash // Track what needs rehashing with full context + pendingKeys map[unique.Handle[string]]struct{} // O(1) pending lookup set + tasks map[unique.Handle[string]]*domain.Task // Full task definitions + hasher ports.Hasher + resolver ports.InputResolver +} + +// cacheEntry links a path to a cache key for invalidation. +type cacheEntry struct { + cacheKey unique.Handle[string] +} + +// NewHashCache creates a new hash cache. +func NewHashCache(hasher ports.Hasher, resolver ports.InputResolver) *HashCache { + return &HashCache{ + entries: make(map[unique.Handle[string]]*domain.TaskHashEntry), + pathToTasks: make(map[unique.Handle[string]][]cacheEntry), + cacheKeyContext: make(map[unique.Handle[string]]PendingRehash), + pendingRehashes: make([]PendingRehash, 0), + pendingKeys: make(map[unique.Handle[string]]struct{}), + tasks: make(map[unique.Handle[string]]*domain.Task), + hasher: hasher, + resolver: resolver, + } +} + +// copyEnv creates a deep copy of an environment map to prevent shared reference bugs. +func copyEnv(env map[string]string) map[string]string { + if env == nil { + return nil + } + copied := make(map[string]string, len(env)) + for k, v := range env { + copied[k] = v + } + return copied +} + +// makeCacheKey creates a unique cache key from task name, root, and environment. +// This ensures different contexts don't collide in the cache. +// Uses a truncated SHA-256 hash (64 bits) of the environment for space efficiency. +// Collision probability is negligible for typical daemon workloads. +func (h *HashCache) makeCacheKey(taskName, root string, env map[string]string) unique.Handle[string] { + // Sort environment keys for deterministic hashing + keys := make([]string, 0, len(env)) + for k := range env { + keys = append(keys, k) + } + sort.Strings(keys) + + // Build environment string + envStr := "" + for _, k := range keys { + envStr += fmt.Sprintf("%s=%s;", k, env[k]) + } + + // Hash the environment to keep key size reasonable + envHash := sha256.Sum256([]byte(envStr)) + envHashStr := hex.EncodeToString(envHash[:8]) // Use first 8 bytes (64 bits) + + // Combine task name, root, and env hash + cacheKey := fmt.Sprintf("%s|%s|%s", taskName, root, envHashStr) + return unique.Make(cacheKey) +} + +// GetInputHash returns the current hash state and value for the given task. +func (h *HashCache) GetInputHash(taskName, root string, env map[string]string) ports.InputHashResult { + h.mu.RLock() + defer h.mu.RUnlock() + + cacheKey := h.makeCacheKey(taskName, root, env) + + // Check if this specific context is pending rehash using O(1) set lookup. + if _, pending := h.pendingKeys[cacheKey]; pending { + return ports.InputHashResult{State: ports.HashPending} + } + + // Check if we have a cached entry. + if entry, ok := h.entries[cacheKey]; ok { + return ports.InputHashResult{ + State: ports.HashReady, + Hash: entry.Hash, + } + } + + // Task is not cached yet. + return ports.InputHashResult{State: ports.HashUnknown} +} + +// Invalidate marks cached hashes for tasks affected by the changed paths. +// For each affected cache entry, we delete it and add it to the pending list for background rehashing. +func (h *HashCache) Invalidate(paths []string) { + h.mu.Lock() + defer h.mu.Unlock() + + // For each changed path, find all cache entries that depend on it. + for _, path := range paths { + pathHandle := unique.Make(path) + if entries, ok := h.pathToTasks[pathHandle]; ok { + for _, entry := range entries { + // Look up the full context for this cache key + if context, ok := h.cacheKeyContext[entry.cacheKey]; ok { + // Add to pending rehashes if not already there (O(1) check with pendingKeys) + if _, exists := h.pendingKeys[entry.cacheKey]; !exists { + // Deep copy the env map to prevent shared reference bugs + h.pendingRehashes = append(h.pendingRehashes, PendingRehash{ + TaskName: context.TaskName, + Root: context.Root, + Env: copyEnv(context.Env), + }) + h.pendingKeys[entry.cacheKey] = struct{}{} + } + } + + // Delete the stale cache entry + delete(h.entries, entry.cacheKey) + } + } + } +} + +// GetTask retrieves the stored task definition by name. +// This is used by background workers to rehash pending tasks. +func (h *HashCache) GetTask(taskName string) (*domain.Task, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + + handle := unique.Make(taskName) + task, ok := h.tasks[handle] + return task, ok +} + +// ComputeHash computes and caches the hash for a task with the given context. +// This should be called to populate the cache for a specific task/root/env combination. +func (h *HashCache) ComputeHash(task *domain.Task, root string, env map[string]string) error { + // Extract string inputs from task. + inputs := make([]string, len(task.Inputs)) + for i, input := range task.Inputs { + inputs[i] = input.String() + } + + // Resolve inputs to concrete paths. + resolved, err := h.resolver.ResolveInputs(inputs, root) + if err != nil { + return err + } + + // Compute the hash with the full task. + hash, err := h.hasher.ComputeInputHash(task, env, resolved) + if err != nil { + return err + } + + // Update the cache with the full task information. + h.updateCache(task, root, env, hash, resolved) + + return nil +} + +// updateCache updates the cache entry for a task and rebuilds the path-to-task index. +func (h *HashCache) updateCache(task *domain.Task, root string, env map[string]string, hash string, resolved []string) { + h.mu.Lock() + defer h.mu.Unlock() + + cacheKey := h.makeCacheKey(task.Name.String(), root, env) + taskHandle := task.Name.Value() + pathHandles := make([]unique.Handle[string], len(resolved)) + + // Remove old index entries for this cache key. + h.removeTaskFromIndex(cacheKey) + + // Add new entry and build new index. + for i, path := range resolved { + pathHandle := unique.Make(path) + pathHandles[i] = pathHandle + + // Add to path-to-task index (using cache key for invalidation). + h.pathToTasks[pathHandle] = append(h.pathToTasks[pathHandle], cacheEntry{cacheKey: cacheKey}) + } + + // Store the cache entry. + h.entries[cacheKey] = &domain.TaskHashEntry{ + Hash: hash, + ResolvedInputs: pathHandles, + ComputedAt: time.Now(), + } + + // Store the cache key context for future invalidation. + // Deep copy the env map to prevent shared reference bugs. + h.cacheKeyContext[cacheKey] = PendingRehash{ + TaskName: task.Name.String(), + Root: root, + Env: copyEnv(env), + } + + // Store the task definition (using simple task name handle). + h.tasks[taskHandle] = task + + // Remove from pending rehashes if it was pending. + if _, wasPending := h.pendingKeys[cacheKey]; wasPending { + // O(1) removal from pending keys set + delete(h.pendingKeys, cacheKey) + + // O(n) removal from pending list (needed for background worker iteration) + for i, pending := range h.pendingRehashes { + pendingKey := h.makeCacheKey(pending.TaskName, pending.Root, pending.Env) + if pendingKey == cacheKey { + h.pendingRehashes = append(h.pendingRehashes[:i], h.pendingRehashes[i+1:]...) + break + } + } + } +} + +// removeTaskFromIndex removes all index entries for the given cache key. +func (h *HashCache) removeTaskFromIndex(cacheKey unique.Handle[string]) { + for path, entries := range h.pathToTasks { + for i, entry := range entries { + if entry.cacheKey == cacheKey { + // Remove this entry from the slice. + h.pathToTasks[path] = append(entries[:i], entries[i+1:]...) + if len(h.pathToTasks[path]) == 0 { + // Delete empty entries. + delete(h.pathToTasks, path) + } + break + } + } + } +} + +// GetPendingTasks returns a list of pending rehash entries with full context. +// This is used by the background worker to know which tasks to rehash. +func (h *HashCache) GetPendingTasks() []PendingRehash { + h.mu.RLock() + defer h.mu.RUnlock() + + // Return a deep copy of the pending list to avoid exposing internal state. + // Must deep-copy Env maps to prevent shared reference bugs. + pending := make([]PendingRehash, len(h.pendingRehashes)) + for i, p := range h.pendingRehashes { + pending[i] = PendingRehash{ + TaskName: p.TaskName, + Root: p.Root, + Env: copyEnv(p.Env), + } + } + return pending +} diff --git a/cli/internal/adapters/watcher/node.go b/cli/internal/adapters/watcher/node.go new file mode 100644 index 0000000..4d61341 --- /dev/null +++ b/cli/internal/adapters/watcher/node.go @@ -0,0 +1,65 @@ +package watcher + +import ( + "context" + "time" + + "github.com/grindlemire/graft" + "go.trai.ch/same/internal/adapters/fs" + "go.trai.ch/same/internal/core/ports" +) + +const ( + // WatcherNodeID is the unique identifier for the file watcher Graft node. + WatcherNodeID graft.ID = "adapter.watcher" + // HashCacheNodeID is the unique identifier for the input hash cache Graft node. + HashCacheNodeID graft.ID = "adapter.hash_cache" +) + +func init() { + // Watcher Node + graft.Register(graft.Node[ports.Watcher]{ + ID: WatcherNodeID, + Cacheable: true, + Run: func(_ context.Context) (ports.Watcher, error) { + return NewWatcher() + }, + }) + + // HashCache Node + graft.Register(graft.Node[*HashCache]{ + ID: HashCacheNodeID, + Cacheable: true, + DependsOn: []graft.ID{fs.HasherNodeID, fs.ResolverNodeID}, + Run: func(ctx context.Context) (*HashCache, error) { + hasher, err := graft.Dep[ports.Hasher](ctx) + if err != nil { + return nil, err + } + resolver, err := graft.Dep[ports.InputResolver](ctx) + if err != nil { + return nil, err + } + // The actual environment and root will come from the daemon's runtime context. + return NewHashCache(hasher, resolver), nil + }, + }) +} + +// NodeID returns the Graft node ID for a given port interface type. +// This is a helper to map port types to their corresponding node IDs. +func NodeID(portType any) graft.ID { + switch portType.(type) { + case ports.Watcher: + return WatcherNodeID + case *HashCache: + return HashCacheNodeID + default: + // This is a compile-time check to ensure the type is handled. + // If you get a compilation error here, add the new port type to the switch. + panic("unknown port type") + } +} + +// DefaultDebounceWindow is the default time window for debouncing file events. +const DefaultDebounceWindow = 50 * time.Millisecond diff --git a/cli/internal/adapters/watcher/watcher.go b/cli/internal/adapters/watcher/watcher.go new file mode 100644 index 0000000..77fa636 --- /dev/null +++ b/cli/internal/adapters/watcher/watcher.go @@ -0,0 +1,186 @@ +package watcher + +import ( + "context" + "fmt" + "io/fs" + "iter" + "os" + "path/filepath" + "unique" + + "github.com/fsnotify/fsnotify" + "go.trai.ch/same/internal/core/ports" +) + +var _ ports.Watcher = (*Watcher)(nil) + +// shouldSkipDirectories are directories that should not be watched. +var shouldSkipDirectories = map[string]bool{ + ".git": true, + ".jj": true, + "node_modules": true, +} + +const eventChannelBuffer = 100 + +// Watcher implements file system watching using fsnotify. +type Watcher struct { + fsWatcher *fsnotify.Watcher + root unique.Handle[string] + events chan ports.WatchEvent +} + +// NewWatcher creates a new file system watcher. +func NewWatcher() (*Watcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &Watcher{ + fsWatcher: watcher, + events: make(chan ports.WatchEvent, eventChannelBuffer), + }, nil +} + +// Start begins watching the given root directory recursively. +func (w *Watcher) Start(ctx context.Context, root string) error { + w.root = unique.Make(root) + + // Walk the directory tree and add all directories to the watcher. + for dir := range w.watchRecursively(root) { + if err := w.fsWatcher.Add(dir); err != nil { + return err + } + } + + // Start processing events in a goroutine. + go w.processEvents(ctx) + + return nil +} + +// Stop stops the watcher and releases all resources. +func (w *Watcher) Stop() error { + return w.fsWatcher.Close() +} + +// Events returns an iterator of file system events. +func (w *Watcher) Events() iter.Seq[ports.WatchEvent] { + return func(yield func(ports.WatchEvent) bool) { + for event := range w.events { + if !yield(event) { + return + } + } + } +} + +// watchRecursively walks the directory tree and yields all directories. +func (w *Watcher) watchRecursively(root string) iter.Seq[string] { + return func(yield func(string) bool) { + _ = filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { + if err != nil { + // Continue walking even if there's an error accessing a directory. + return nil //nolint:nilerr // This is intentional - we want to skip problematic directories + } + if d.IsDir() { + if w.shouldSkip(d.Name()) { + return fs.SkipDir + } + if !yield(path) { + return filepath.SkipAll + } + } + return nil + }) + } +} + +// shouldSkip returns true if the directory should be skipped. +func (w *Watcher) shouldSkip(name string) bool { + return shouldSkipDirectories[name] +} + +// processEvents processes raw fsnotify events and converts them to ports.WatchEvent. +// +//nolint:cyclop // This function is complex due to multiple event types and error handling +func (w *Watcher) processEvents(ctx context.Context) { + defer close(w.events) + + for { + select { + case <-ctx.Done(): + return + case event, ok := <-w.fsWatcher.Events: + if !ok { + return + } + + // Convert fsnotify event to ports.WatchEvent. + watchEvent := w.convertEvent(event) + if watchEvent == nil { + continue + } + + // Send the event to the output channel. + select { + case w.events <- *watchEvent: + case <-ctx.Done(): + return + } + + // If a new directory was created, add it to the watcher. + if event.Op&fsnotify.Create == fsnotify.Create && watchEvent.Operation == ports.OpCreate { + if info, err := os.Stat(event.Name); err == nil && info.IsDir() && !w.shouldSkip(info.Name()) { + // Recursively add the new directory and its subdirectories. + for dir := range w.watchRecursively(event.Name) { + _ = w.fsWatcher.Add(dir) + } + } + } + + case err, ok := <-w.fsWatcher.Errors: + if !ok { + return + } + // Log error to stderr and continue processing. + fmt.Fprintf(os.Stderr, "watcher: file system error: %v\n", err) + } + } +} + +// convertEvent converts an fsnotify event to a ports.WatchEvent. +func (w *Watcher) convertEvent(event fsnotify.Event) *ports.WatchEvent { + path := event.Name + + if event.Op&fsnotify.Write == fsnotify.Write { + return &ports.WatchEvent{ + Path: path, + Operation: ports.OpWrite, + } + } + + if event.Op&fsnotify.Create == fsnotify.Create { + return &ports.WatchEvent{ + Path: path, + Operation: ports.OpCreate, + } + } + + if event.Op&fsnotify.Remove == fsnotify.Remove { + return &ports.WatchEvent{ + Path: path, + Operation: ports.OpRemove, + } + } + + if event.Op&fsnotify.Rename == fsnotify.Rename { + return &ports.WatchEvent{ + Path: path, + Operation: ports.OpRename, + } + } + + return nil +} diff --git a/cli/internal/app/app_test.go b/cli/internal/app/app_test.go index bc02dc6..16b4ba0 100644 --- a/cli/internal/app/app_test.go +++ b/cli/internal/app/app_test.go @@ -64,6 +64,7 @@ func TestApp_Build(t *testing.T) { ). WithDisableTick() + mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) // Expectations mockLoader.EXPECT().Load(".").Return(g, nil) @@ -122,6 +123,7 @@ func TestApp_Run_NoTargets(t *testing.T) { ). WithDisableTick() + mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) // Expectations mockLoader.EXPECT().Load(".").Return(domain.NewGraph(), nil) @@ -177,6 +179,7 @@ func TestApp_Run_ConfigLoaderError(t *testing.T) { ). WithDisableTick() + mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) // Expectations - loader fails mockLoader.EXPECT().Load(".").Return(nil, errors.New("config load error")) @@ -242,6 +245,7 @@ func TestApp_Run_BuildExecutionFailed(t *testing.T) { ). WithDisableTick() + mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) // Expectations mockLoader.EXPECT().Load(".").Return(g, nil) @@ -439,6 +443,7 @@ func TestApp_Run_LinearMode(t *testing.T) { ). WithDisableTick() + mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) mockLoader.EXPECT().Load(".").Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) @@ -500,6 +505,7 @@ func TestApp_Run_InspectMode(t *testing.T) { ). WithDisableTick() + mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) mockLoader.EXPECT().Load(".").Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) diff --git a/cli/internal/core/domain/hash_cache.go b/cli/internal/core/domain/hash_cache.go new file mode 100644 index 0000000..7e2b288 --- /dev/null +++ b/cli/internal/core/domain/hash_cache.go @@ -0,0 +1,16 @@ +package domain + +import ( + "time" + "unique" +) + +// TaskHashEntry stores the computed hash and related metadata for a task. +type TaskHashEntry struct { + // Hash is the computed input hash for the task. + Hash string + // ResolvedInputs is the list of resolved input paths at the time of hashing. + ResolvedInputs []unique.Handle[string] + // ComputedAt is the timestamp when the hash was computed. + ComputedAt time.Time +} diff --git a/cli/internal/core/ports/daemon.go b/cli/internal/core/ports/daemon.go index 5a10737..ba5b4ff 100644 --- a/cli/internal/core/ports/daemon.go +++ b/cli/internal/core/ports/daemon.go @@ -44,6 +44,13 @@ type DaemonClient interface { tools map[string]string, ) (envVars []string, cacheHit bool, err error) + // GetInputHash retrieves the cached or pending input hash for a task. + GetInputHash( + ctx context.Context, + taskName, root string, + env map[string]string, + ) (InputHashResult, error) + // Close releases client resources. Close() error } diff --git a/cli/internal/core/ports/input_hash_cache.go b/cli/internal/core/ports/input_hash_cache.go new file mode 100644 index 0000000..993bce0 --- /dev/null +++ b/cli/internal/core/ports/input_hash_cache.go @@ -0,0 +1,35 @@ +package ports + +// InputHashState represents the state of an input hash computation. +type InputHashState uint8 + +const ( + // HashReady indicates the hash has been computed and is available. + HashReady InputHashState = iota + // HashPending indicates the hash is currently being computed. + HashPending + // HashUnknown indicates the hash state is unknown (typically means not yet cached). + HashUnknown +) + +// InputHashResult contains the result of an input hash query. +type InputHashResult struct { + // State indicates the current state of the hash computation. + State InputHashState + // Hash is the computed hash (only valid when State is HashReady). + Hash string +} + +// InputHashCache defines the interface for caching and managing input hashes. +// +//go:generate mockgen -destination=mocks/mock_input_hash_cache.go -package=mocks -source=input_hash_cache.go +type InputHashCache interface { + // GetInputHash returns the current hash state and value for the given task. + // root and env are provided per-request to avoid race conditions when multiple + // requests query hashes for the same task in different contexts. + // It returns HashUnknown if the task has not been cached yet. + GetInputHash(taskName, root string, env map[string]string) InputHashResult + // Invalidate marks cached hashes for tasks affected by the changed paths. + // This should be called when files are modified. + Invalidate(paths []string) +} diff --git a/cli/internal/core/ports/watcher.go b/cli/internal/core/ports/watcher.go new file mode 100644 index 0000000..6e70ab8 --- /dev/null +++ b/cli/internal/core/ports/watcher.go @@ -0,0 +1,39 @@ +package ports + +import ( + "context" + "iter" +) + +// WatchOp represents the type of file system operation. +type WatchOp uint8 + +const ( + // OpCreate indicates a file or directory was created. + OpCreate WatchOp = iota + // OpWrite indicates a file was modified. + OpWrite + // OpRemove indicates a file or directory was removed. + OpRemove + // OpRename indicates a file or directory was renamed. + OpRename +) + +// WatchEvent represents a file system event from the watcher. +type WatchEvent struct { + // Path is the absolute path of the file or directory that changed. + Path string + // Operation is the type of change that occurred. + Operation WatchOp +} + +// Watcher defines the interface for watching file system changes. +type Watcher interface { + // Start begins watching the given root directory recursively. + // It returns an error if the watcher fails to start. + Start(ctx context.Context, root string) error + // Stop stops the watcher and releases all resources. + Stop() error + // Events returns an iterator of file system events. + Events() iter.Seq[WatchEvent] +} From c662c351cde7aaf89855672fa23a352d8849d327 Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 00:40:20 +0100 Subject: [PATCH 06/20] feat(#74): add ExecuteTask streaming RPC to daemon proto - Add ExecuteTask server-side streaming RPC to DaemonService - Define ExecuteTaskRequest with task details and PTY dimensions - Define ExecuteTaskResponse for log chunks (raw bytes) - Import google/rpc/status.proto for error details - Add buf dependency on googleapis for rpc types Exit code handling: - Success (exit=0): gRPC trailer x-exit-code: 0 - Failure (exit!=0): gRPC status UNKNOWN with exit code in trailer --- cli/api/daemon/v1/daemon.proto | 19 +++++++++++++++++++ cli/buf.yaml | 2 ++ 2 files changed, 21 insertions(+) diff --git a/cli/api/daemon/v1/daemon.proto b/cli/api/daemon/v1/daemon.proto index 0fe1727..90c9377 100644 --- a/cli/api/daemon/v1/daemon.proto +++ b/cli/api/daemon/v1/daemon.proto @@ -4,6 +4,8 @@ package daemon.v1; option go_package = "go.trai.ch/same/api/daemon/v1;daemonv1"; +import "google/rpc/status.proto"; + service DaemonService { // Ping checks daemon health and resets the inactivity timer. rpc Ping(PingRequest) returns (PingResponse); @@ -22,6 +24,9 @@ service DaemonService { // GetInputHash returns the cached or pending input hash for a task. rpc GetInputHash(GetInputHashRequest) returns (GetInputHashResponse); + + // ExecuteTask runs a task and streams logs back to the client. + rpc ExecuteTask(ExecuteTaskRequest) returns (stream ExecuteTaskResponse); } message PingRequest {} @@ -103,3 +108,17 @@ message GetInputHashResponse { State state = 1; string hash = 2; } + +message ExecuteTaskRequest { + string task_name = 1; + repeated string command = 2; + string working_dir = 3; + map task_environment = 4; + repeated string nix_environment = 5; + int32 pty_rows = 6; + int32 pty_cols = 7; +} + +message ExecuteTaskResponse { + bytes data = 1; +} diff --git a/cli/buf.yaml b/cli/buf.yaml index 5fc4486..732d25a 100644 --- a/cli/buf.yaml +++ b/cli/buf.yaml @@ -1,4 +1,6 @@ version: v1 +deps: + - buf.build/googleapis/googleapis lint: use: - DEFAULT From bb3d690745d1edfd2c8ed10465640c131fc6124e Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 00:40:33 +0100 Subject: [PATCH 07/20] feat(#74): regenerate protobuf code for ExecuteTask RPC - Regenerate daemon.pb.go and daemon_grpc.pb.go - Generated by buf generate with googleapis dependency --- cli/api/daemon/v1/daemon.pb.go | 218 ++++++++++++++++++++++++---- cli/api/daemon/v1/daemon_grpc.pb.go | 46 +++++- 2 files changed, 233 insertions(+), 31 deletions(-) diff --git a/cli/api/daemon/v1/daemon.pb.go b/cli/api/daemon/v1/daemon.pb.go index 290e321..e7fb4e2 100644 --- a/cli/api/daemon/v1/daemon.pb.go +++ b/cli/api/daemon/v1/daemon.pb.go @@ -11,6 +11,7 @@ import ( sync "sync" unsafe "unsafe" + _ "google.golang.org/genproto/googleapis/rpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -841,11 +842,147 @@ func (x *GetInputHashResponse) GetHash() string { return "" } +type ExecuteTaskRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskName string `protobuf:"bytes,1,opt,name=task_name,json=taskName,proto3" json:"task_name,omitempty"` + Command []string `protobuf:"bytes,2,rep,name=command,proto3" json:"command,omitempty"` + WorkingDir string `protobuf:"bytes,3,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` + TaskEnvironment map[string]string `protobuf:"bytes,4,rep,name=task_environment,json=taskEnvironment,proto3" json:"task_environment,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + NixEnvironment []string `protobuf:"bytes,5,rep,name=nix_environment,json=nixEnvironment,proto3" json:"nix_environment,omitempty"` + PtyRows int32 `protobuf:"varint,6,opt,name=pty_rows,json=ptyRows,proto3" json:"pty_rows,omitempty"` + PtyCols int32 `protobuf:"varint,7,opt,name=pty_cols,json=ptyCols,proto3" json:"pty_cols,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExecuteTaskRequest) Reset() { + *x = ExecuteTaskRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExecuteTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteTaskRequest) ProtoMessage() {} + +func (x *ExecuteTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteTaskRequest.ProtoReflect.Descriptor instead. +func (*ExecuteTaskRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{14} +} + +func (x *ExecuteTaskRequest) GetTaskName() string { + if x != nil { + return x.TaskName + } + return "" +} + +func (x *ExecuteTaskRequest) GetCommand() []string { + if x != nil { + return x.Command + } + return nil +} + +func (x *ExecuteTaskRequest) GetWorkingDir() string { + if x != nil { + return x.WorkingDir + } + return "" +} + +func (x *ExecuteTaskRequest) GetTaskEnvironment() map[string]string { + if x != nil { + return x.TaskEnvironment + } + return nil +} + +func (x *ExecuteTaskRequest) GetNixEnvironment() []string { + if x != nil { + return x.NixEnvironment + } + return nil +} + +func (x *ExecuteTaskRequest) GetPtyRows() int32 { + if x != nil { + return x.PtyRows + } + return 0 +} + +func (x *ExecuteTaskRequest) GetPtyCols() int32 { + if x != nil { + return x.PtyCols + } + return 0 +} + +type ExecuteTaskResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExecuteTaskResponse) Reset() { + *x = ExecuteTaskResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExecuteTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteTaskResponse) ProtoMessage() {} + +func (x *ExecuteTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteTaskResponse.ProtoReflect.Descriptor instead. +func (*ExecuteTaskResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{15} +} + +func (x *ExecuteTaskResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + var File_api_daemon_v1_daemon_proto protoreflect.FileDescriptor const file_api_daemon_v1_daemon_proto_rawDesc = "" + "\n" + - "\x1aapi/daemon/v1/daemon.proto\x12\tdaemon.v1\"\r\n" + + "\x1aapi/daemon/v1/daemon.proto\x12\tdaemon.v1\x1a\x17google/rpc/status.proto\"\r\n" + "\vPingRequest\"D\n" + "\fPingResponse\x124\n" + "\x16idle_remaining_seconds\x18\x01 \x01(\x03R\x14idleRemainingSeconds\"\x0f\n" + @@ -911,14 +1048,29 @@ const file_api_daemon_v1_daemon_proto_rawDesc = "" + "\x05State\x12\t\n" + "\x05READY\x10\x00\x12\v\n" + "\aPENDING\x10\x01\x12\v\n" + - "\aUNKNOWN\x10\x022\xb9\x03\n" + + "\aUNKNOWN\x10\x02\"\xee\x02\n" + + "\x12ExecuteTaskRequest\x12\x1b\n" + + "\ttask_name\x18\x01 \x01(\tR\btaskName\x12\x18\n" + + "\acommand\x18\x02 \x03(\tR\acommand\x12\x1f\n" + + "\vworking_dir\x18\x03 \x01(\tR\n" + + "workingDir\x12]\n" + + "\x10task_environment\x18\x04 \x03(\v22.daemon.v1.ExecuteTaskRequest.TaskEnvironmentEntryR\x0ftaskEnvironment\x12'\n" + + "\x0fnix_environment\x18\x05 \x03(\tR\x0enixEnvironment\x12\x19\n" + + "\bpty_rows\x18\x06 \x01(\x05R\aptyRows\x12\x19\n" + + "\bpty_cols\x18\a \x01(\x05R\aptyCols\x1aB\n" + + "\x14TaskEnvironmentEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\")\n" + + "\x13ExecuteTaskResponse\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data2\x89\x04\n" + "\rDaemonService\x127\n" + "\x04Ping\x12\x16.daemon.v1.PingRequest\x1a\x17.daemon.v1.PingResponse\x12=\n" + "\x06Status\x12\x18.daemon.v1.StatusRequest\x1a\x19.daemon.v1.StatusResponse\x12C\n" + "\bShutdown\x12\x1a.daemon.v1.ShutdownRequest\x1a\x1b.daemon.v1.ShutdownResponse\x12C\n" + "\bGetGraph\x12\x1a.daemon.v1.GetGraphRequest\x1a\x1b.daemon.v1.GetGraphResponse\x12U\n" + "\x0eGetEnvironment\x12 .daemon.v1.GetEnvironmentRequest\x1a!.daemon.v1.GetEnvironmentResponse\x12O\n" + - "\fGetInputHash\x12\x1e.daemon.v1.GetInputHashRequest\x1a\x1f.daemon.v1.GetInputHashResponseB(Z&go.trai.ch/same/api/daemon/v1;daemonv1b\x06proto3" + "\fGetInputHash\x12\x1e.daemon.v1.GetInputHashRequest\x1a\x1f.daemon.v1.GetInputHashResponse\x12N\n" + + "\vExecuteTask\x12\x1d.daemon.v1.ExecuteTaskRequest\x1a\x1e.daemon.v1.ExecuteTaskResponse0\x01B(Z&go.trai.ch/same/api/daemon/v1;daemonv1b\x06proto3" var ( file_api_daemon_v1_daemon_proto_rawDescOnce sync.Once @@ -933,7 +1085,7 @@ func file_api_daemon_v1_daemon_proto_rawDescGZIP() []byte { } var file_api_daemon_v1_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_api_daemon_v1_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 18) +var file_api_daemon_v1_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 21) var file_api_daemon_v1_daemon_proto_goTypes = []any{ (GetInputHashResponse_State)(0), // 0: daemon.v1.GetInputHashResponse.State (*PingRequest)(nil), // 1: daemon.v1.PingRequest @@ -950,36 +1102,42 @@ var file_api_daemon_v1_daemon_proto_goTypes = []any{ (*GetEnvironmentResponse)(nil), // 12: daemon.v1.GetEnvironmentResponse (*GetInputHashRequest)(nil), // 13: daemon.v1.GetInputHashRequest (*GetInputHashResponse)(nil), // 14: daemon.v1.GetInputHashResponse - nil, // 15: daemon.v1.TaskProto.ToolsEntry - nil, // 16: daemon.v1.TaskProto.EnvironmentEntry - nil, // 17: daemon.v1.GetEnvironmentRequest.ToolsEntry - nil, // 18: daemon.v1.GetInputHashRequest.EnvironmentEntry + (*ExecuteTaskRequest)(nil), // 15: daemon.v1.ExecuteTaskRequest + (*ExecuteTaskResponse)(nil), // 16: daemon.v1.ExecuteTaskResponse + nil, // 17: daemon.v1.TaskProto.ToolsEntry + nil, // 18: daemon.v1.TaskProto.EnvironmentEntry + nil, // 19: daemon.v1.GetEnvironmentRequest.ToolsEntry + nil, // 20: daemon.v1.GetInputHashRequest.EnvironmentEntry + nil, // 21: daemon.v1.ExecuteTaskRequest.TaskEnvironmentEntry } var file_api_daemon_v1_daemon_proto_depIdxs = []int32{ 7, // 0: daemon.v1.GetGraphRequest.config_mtimes:type_name -> daemon.v1.ConfigMtime - 15, // 1: daemon.v1.TaskProto.tools:type_name -> daemon.v1.TaskProto.ToolsEntry - 16, // 2: daemon.v1.TaskProto.environment:type_name -> daemon.v1.TaskProto.EnvironmentEntry + 17, // 1: daemon.v1.TaskProto.tools:type_name -> daemon.v1.TaskProto.ToolsEntry + 18, // 2: daemon.v1.TaskProto.environment:type_name -> daemon.v1.TaskProto.EnvironmentEntry 9, // 3: daemon.v1.GetGraphResponse.tasks:type_name -> daemon.v1.TaskProto - 17, // 4: daemon.v1.GetEnvironmentRequest.tools:type_name -> daemon.v1.GetEnvironmentRequest.ToolsEntry - 18, // 5: daemon.v1.GetInputHashRequest.environment:type_name -> daemon.v1.GetInputHashRequest.EnvironmentEntry + 19, // 4: daemon.v1.GetEnvironmentRequest.tools:type_name -> daemon.v1.GetEnvironmentRequest.ToolsEntry + 20, // 5: daemon.v1.GetInputHashRequest.environment:type_name -> daemon.v1.GetInputHashRequest.EnvironmentEntry 0, // 6: daemon.v1.GetInputHashResponse.state:type_name -> daemon.v1.GetInputHashResponse.State - 1, // 7: daemon.v1.DaemonService.Ping:input_type -> daemon.v1.PingRequest - 3, // 8: daemon.v1.DaemonService.Status:input_type -> daemon.v1.StatusRequest - 5, // 9: daemon.v1.DaemonService.Shutdown:input_type -> daemon.v1.ShutdownRequest - 8, // 10: daemon.v1.DaemonService.GetGraph:input_type -> daemon.v1.GetGraphRequest - 11, // 11: daemon.v1.DaemonService.GetEnvironment:input_type -> daemon.v1.GetEnvironmentRequest - 13, // 12: daemon.v1.DaemonService.GetInputHash:input_type -> daemon.v1.GetInputHashRequest - 2, // 13: daemon.v1.DaemonService.Ping:output_type -> daemon.v1.PingResponse - 4, // 14: daemon.v1.DaemonService.Status:output_type -> daemon.v1.StatusResponse - 6, // 15: daemon.v1.DaemonService.Shutdown:output_type -> daemon.v1.ShutdownResponse - 10, // 16: daemon.v1.DaemonService.GetGraph:output_type -> daemon.v1.GetGraphResponse - 12, // 17: daemon.v1.DaemonService.GetEnvironment:output_type -> daemon.v1.GetEnvironmentResponse - 14, // 18: daemon.v1.DaemonService.GetInputHash:output_type -> daemon.v1.GetInputHashResponse - 13, // [13:19] is the sub-list for method output_type - 7, // [7:13] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 21, // 7: daemon.v1.ExecuteTaskRequest.task_environment:type_name -> daemon.v1.ExecuteTaskRequest.TaskEnvironmentEntry + 1, // 8: daemon.v1.DaemonService.Ping:input_type -> daemon.v1.PingRequest + 3, // 9: daemon.v1.DaemonService.Status:input_type -> daemon.v1.StatusRequest + 5, // 10: daemon.v1.DaemonService.Shutdown:input_type -> daemon.v1.ShutdownRequest + 8, // 11: daemon.v1.DaemonService.GetGraph:input_type -> daemon.v1.GetGraphRequest + 11, // 12: daemon.v1.DaemonService.GetEnvironment:input_type -> daemon.v1.GetEnvironmentRequest + 13, // 13: daemon.v1.DaemonService.GetInputHash:input_type -> daemon.v1.GetInputHashRequest + 15, // 14: daemon.v1.DaemonService.ExecuteTask:input_type -> daemon.v1.ExecuteTaskRequest + 2, // 15: daemon.v1.DaemonService.Ping:output_type -> daemon.v1.PingResponse + 4, // 16: daemon.v1.DaemonService.Status:output_type -> daemon.v1.StatusResponse + 6, // 17: daemon.v1.DaemonService.Shutdown:output_type -> daemon.v1.ShutdownResponse + 10, // 18: daemon.v1.DaemonService.GetGraph:output_type -> daemon.v1.GetGraphResponse + 12, // 19: daemon.v1.DaemonService.GetEnvironment:output_type -> daemon.v1.GetEnvironmentResponse + 14, // 20: daemon.v1.DaemonService.GetInputHash:output_type -> daemon.v1.GetInputHashResponse + 16, // 21: daemon.v1.DaemonService.ExecuteTask:output_type -> daemon.v1.ExecuteTaskResponse + 15, // [15:22] is the sub-list for method output_type + 8, // [8:15] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_api_daemon_v1_daemon_proto_init() } @@ -993,7 +1151,7 @@ func file_api_daemon_v1_daemon_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_daemon_v1_daemon_proto_rawDesc), len(file_api_daemon_v1_daemon_proto_rawDesc)), NumEnums: 1, - NumMessages: 18, + NumMessages: 21, NumExtensions: 0, NumServices: 1, }, diff --git a/cli/api/daemon/v1/daemon_grpc.pb.go b/cli/api/daemon/v1/daemon_grpc.pb.go index 47467fc..8d4a0cf 100644 --- a/cli/api/daemon/v1/daemon_grpc.pb.go +++ b/cli/api/daemon/v1/daemon_grpc.pb.go @@ -26,6 +26,7 @@ const ( DaemonService_GetGraph_FullMethodName = "/daemon.v1.DaemonService/GetGraph" DaemonService_GetEnvironment_FullMethodName = "/daemon.v1.DaemonService/GetEnvironment" DaemonService_GetInputHash_FullMethodName = "/daemon.v1.DaemonService/GetInputHash" + DaemonService_ExecuteTask_FullMethodName = "/daemon.v1.DaemonService/ExecuteTask" ) // DaemonServiceClient is the client API for DaemonService service. @@ -44,6 +45,8 @@ type DaemonServiceClient interface { GetEnvironment(ctx context.Context, in *GetEnvironmentRequest, opts ...grpc.CallOption) (*GetEnvironmentResponse, error) // GetInputHash returns the cached or pending input hash for a task. GetInputHash(ctx context.Context, in *GetInputHashRequest, opts ...grpc.CallOption) (*GetInputHashResponse, error) + // ExecuteTask runs a task and streams logs back to the client. + ExecuteTask(ctx context.Context, in *ExecuteTaskRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExecuteTaskResponse], error) } type daemonServiceClient struct { @@ -114,6 +117,25 @@ func (c *daemonServiceClient) GetInputHash(ctx context.Context, in *GetInputHash return out, nil } +func (c *daemonServiceClient) ExecuteTask(ctx context.Context, in *ExecuteTaskRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExecuteTaskResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], DaemonService_ExecuteTask_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[ExecuteTaskRequest, ExecuteTaskResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type DaemonService_ExecuteTaskClient = grpc.ServerStreamingClient[ExecuteTaskResponse] + // DaemonServiceServer is the server API for DaemonService service. // All implementations must embed UnimplementedDaemonServiceServer // for forward compatibility. @@ -130,6 +152,8 @@ type DaemonServiceServer interface { GetEnvironment(context.Context, *GetEnvironmentRequest) (*GetEnvironmentResponse, error) // GetInputHash returns the cached or pending input hash for a task. GetInputHash(context.Context, *GetInputHashRequest) (*GetInputHashResponse, error) + // ExecuteTask runs a task and streams logs back to the client. + ExecuteTask(*ExecuteTaskRequest, grpc.ServerStreamingServer[ExecuteTaskResponse]) error mustEmbedUnimplementedDaemonServiceServer() } @@ -158,6 +182,9 @@ func (UnimplementedDaemonServiceServer) GetEnvironment(context.Context, *GetEnvi func (UnimplementedDaemonServiceServer) GetInputHash(context.Context, *GetInputHashRequest) (*GetInputHashResponse, error) { return nil, status.Error(codes.Unimplemented, "method GetInputHash not implemented") } +func (UnimplementedDaemonServiceServer) ExecuteTask(*ExecuteTaskRequest, grpc.ServerStreamingServer[ExecuteTaskResponse]) error { + return status.Error(codes.Unimplemented, "method ExecuteTask not implemented") +} func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} func (UnimplementedDaemonServiceServer) testEmbeddedByValue() {} @@ -287,6 +314,17 @@ func _DaemonService_GetInputHash_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _DaemonService_ExecuteTask_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ExecuteTaskRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DaemonServiceServer).ExecuteTask(m, &grpc.GenericServerStream[ExecuteTaskRequest, ExecuteTaskResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type DaemonService_ExecuteTaskServer = grpc.ServerStreamingServer[ExecuteTaskResponse] + // DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -319,6 +357,12 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{ Handler: _DaemonService_GetInputHash_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ExecuteTask", + Handler: _DaemonService_ExecuteTask_Handler, + ServerStreams: true, + }, + }, Metadata: "api/daemon/v1/daemon.proto", } From 9aa9aca38533c9856beef34ba5efa9a4d2633cc6 Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 00:40:51 +0100 Subject: [PATCH 08/20] feat(#74): add ExecuteTask to DaemonClient interface - Extend DaemonClient interface with ExecuteTask method - Accept context, task, nixEnv, stdout, stderr parameters - Return error with exit_code field for non-zero exits --- cli/internal/core/ports/daemon.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cli/internal/core/ports/daemon.go b/cli/internal/core/ports/daemon.go index ba5b4ff..fe75e55 100644 --- a/cli/internal/core/ports/daemon.go +++ b/cli/internal/core/ports/daemon.go @@ -2,6 +2,7 @@ package ports import ( "context" + "io" "time" "go.trai.ch/same/internal/core/domain" @@ -51,6 +52,14 @@ type DaemonClient interface { env map[string]string, ) (InputHashResult, error) + // ExecuteTask runs a task on the daemon and streams output. + ExecuteTask( + ctx context.Context, + task *domain.Task, + nixEnv []string, + stdout, stderr io.Writer, + ) error + // Close releases client resources. Close() error } From c7e4254cf79e761c21853d6025bd12858fe0a722 Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 00:41:00 +0100 Subject: [PATCH 09/20] feat(#74): implement client-side task execution - Implement ExecuteTask method on daemon Client - Handle streaming RPC and forward log chunks to io.Writer - Extract exit code from gRPC trailers and status - Add handleExecuteError helper for error handling - Use constants for default PTY dimensions Streaming behavior: - Receives LogChunk messages and writes to stdout - Extracts x-exit-code trailer on completion - Handles UNKNOWN status for task failures --- cli/internal/adapters/daemon/client.go | 81 ++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/cli/internal/adapters/daemon/client.go b/cli/internal/adapters/daemon/client.go index 9b639d8..9adae64 100644 --- a/cli/internal/adapters/daemon/client.go +++ b/cli/internal/adapters/daemon/client.go @@ -4,6 +4,8 @@ package daemon import ( "context" + "io" + "strconv" "time" "go.trai.ch/same/api/daemon/v1" @@ -11,7 +13,9 @@ import ( "go.trai.ch/same/internal/core/ports" "go.trai.ch/zerr" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" ) // Client implements ports.DaemonClient. @@ -173,6 +177,83 @@ func (c *Client) GetInputHash( }, nil } +// ExecuteTask implements ports.DaemonClient. +func (c *Client) ExecuteTask( + ctx context.Context, + task *domain.Task, + nixEnv []string, + stdout, _ io.Writer, +) error { + // Build request + const ( + defaultPtyRows = 24 + defaultPtyCols = 80 + ) + + req := &daemonv1.ExecuteTaskRequest{ + TaskName: task.Name.String(), + Command: task.Command, + WorkingDir: task.WorkingDir.String(), + TaskEnvironment: task.Environment, + NixEnvironment: nixEnv, + PtyRows: defaultPtyRows, + PtyCols: defaultPtyCols, + } + + // Start streaming RPC + stream, err := c.client.ExecuteTask(ctx, req) + if err != nil { + return zerr.Wrap(err, "ExecuteTask RPC failed") + } + + // Receive and forward log chunks + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return c.handleExecuteError(err, stream) + } + if _, writeErr := stdout.Write(resp.Data); writeErr != nil { + return zerr.Wrap(writeErr, "failed to write log chunk") + } + } + + // Check trailer for success case + trailer := stream.Trailer() + if exitStr := trailer.Get("x-exit-code"); len(exitStr) > 0 { + exitCode, _ := strconv.Atoi(exitStr[0]) + if exitCode != 0 { + return zerr.With(domain.ErrTaskExecutionFailed, "exit_code", exitCode) + } + } + + return nil +} + +// handleExecuteError extracts the exit code from a failed ExecuteTask RPC. +func (c *Client) handleExecuteError(err error, stream grpc.ClientStream) error { + st, ok := status.FromError(err) + if !ok { + return zerr.Wrap(err, "ExecuteTask failed") + } + + // For non-zero exit codes, we get UNKNOWN status + if st.Code() == codes.Unknown { + // Try to extract exit code from trailer + trailer := stream.Trailer() + if exitStr := trailer.Get("x-exit-code"); len(exitStr) > 0 { + exitCode, _ := strconv.Atoi(exitStr[0]) + return zerr.With(domain.ErrTaskExecutionFailed, "exit_code", exitCode) + } + // If no trailer, return the status error + return zerr.Wrap(err, "ExecuteTask failed with unknown error") + } + + return zerr.Wrap(err, "ExecuteTask failed") +} + // stringsToInternedStrings converts a slice of strings to InternedString. func (c *Client) stringsToInternedStrings(strs []string) []domain.InternedString { result := make([]domain.InternedString, len(strs)) From 671fa42a679c4c46ddf4db718d7cf0715578986a Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 00:41:08 +0100 Subject: [PATCH 10/20] feat(#74): implement server-side task execution - Add executor field to Server struct - Update NewServerWithDeps to accept executor parameter - Implement ExecuteTask RPC handler with streaming - Add streamWriter helper for io.Writer interface - Set exit code in gRPC trailer (x-exit-code) - Return UNKNOWN status for non-zero exit codes Implementation details: - Reconstruct domain.Task from ExecuteTaskRequest - Execute via injected executor with PTY support - Stream output back to client via LogChunk messages - Handle exit codes consistently with local execution --- cli/internal/adapters/daemon/server.go | 60 ++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/cli/internal/adapters/daemon/server.go b/cli/internal/adapters/daemon/server.go index 8dad80d..cbe7d96 100644 --- a/cli/internal/adapters/daemon/server.go +++ b/cli/internal/adapters/daemon/server.go @@ -6,6 +6,7 @@ import ( "net" "os" "path/filepath" + "strconv" "go.trai.ch/same/api/daemon/v1" "go.trai.ch/same/internal/adapters/watcher" @@ -14,6 +15,7 @@ import ( "go.trai.ch/zerr" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -24,6 +26,7 @@ type Server struct { cache *ServerCache configLoader ports.ConfigLoader envFactory ports.EnvironmentFactory + executor ports.Executor watcherSvc *WatcherService grpcServer *grpc.Server listener net.Listener @@ -51,12 +54,14 @@ func NewServerWithDeps( lifecycle *Lifecycle, configLoader ports.ConfigLoader, envFactory ports.EnvironmentFactory, + executor ports.Executor, ) *Server { s := &Server{ lifecycle: lifecycle, cache: NewServerCache(), configLoader: configLoader, envFactory: envFactory, + executor: executor, grpcServer: grpc.NewServer(), } daemonv1.RegisterDaemonServiceServer(s.grpcServer, s) @@ -314,3 +319,58 @@ func (s *Server) GetInputHash( Hash: result.Hash, }, nil } + +// streamWriter implements io.Writer for streaming task output. +type streamWriter struct { + stream daemonv1.DaemonService_ExecuteTaskServer +} + +func (w *streamWriter) Write(p []byte) (int, error) { + if err := w.stream.Send(&daemonv1.ExecuteTaskResponse{Data: p}); err != nil { + return 0, err + } + return len(p), nil +} + +// ExecuteTask implements DaemonService.ExecuteTask. +func (s *Server) ExecuteTask( + req *daemonv1.ExecuteTaskRequest, + stream daemonv1.DaemonService_ExecuteTaskServer, +) error { + // Reset inactivity timer + s.lifecycle.ResetTimer() + + // Guard: ensure server is configured for task execution + if s.executor == nil { + return status.Error(codes.FailedPrecondition, "server not configured for task execution") + } + + // Reconstruct domain.Task from request + task := &domain.Task{ + Name: domain.NewInternedString(req.TaskName), + Command: req.Command, + WorkingDir: domain.NewInternedString(req.WorkingDir), + Environment: req.TaskEnvironment, + } + + // Create streaming writer + writer := &streamWriter{stream: stream} + + // Execute with PTY (via executor) + err := s.executor.Execute(stream.Context(), task, req.NixEnvironment, writer, writer) + + // Extract exit code + exitCode := 0 + if err != nil { + exitCode = 1 + } + + // Set trailer with exit code + stream.SetTrailer(metadata.Pairs("x-exit-code", strconv.Itoa(exitCode))) + + // Return error status for non-zero exit + if exitCode != 0 { + return status.Errorf(codes.Unknown, "task failed with exit code %d", exitCode) + } + return nil +} From 44e68d463f745bb899c376f550841bc85d998265 Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 00:41:16 +0100 Subject: [PATCH 11/20] feat(#74): integrate remote task execution into scheduler - Add noDaemon field to Scheduler struct - Add WithNoDaemon method for CLI flag propagation - Refactor executeTask to use executeWithStrategy - Implement executeWithFallback for remote/local execution - Add isConnectionError helper for fallback detection - Pass NoDaemon option through app.Run to scheduler Execution strategy: - Try remote execution via daemon when available - Fallback to local on connection errors - Respect --no-daemon flag to bypass remote execution - Maintain backward compatibility with local-only mode --- cli/internal/app/app.go | 10 ++++- cli/internal/engine/scheduler/scheduler.go | 51 +++++++++++++++++++++- 2 files changed, 57 insertions(+), 4 deletions(-) diff --git a/cli/internal/app/app.go b/cli/internal/app/app.go index 2763e9e..6592643 100644 --- a/cli/internal/app/app.go +++ b/cli/internal/app/app.go @@ -79,6 +79,7 @@ type RunOptions struct { NoCache bool Inspect bool OutputMode string + NoDaemon bool // When true, bypass remote daemon execution } // Run executes the build process for the specified targets. @@ -168,7 +169,7 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er a.resolver, tracer, a.envFactory, - ) + ).WithNoDaemon(opts.NoDaemon) // Pass daemon client to scheduler if available if daemonAvailable { @@ -259,7 +260,12 @@ func setupOTel(bridge *telemetry.Bridge) { // ServeDaemon starts the daemon server. func (a *App) ServeDaemon(ctx context.Context) error { lifecycle := daemon.NewLifecycle(domain.DaemonInactivityTimeout) - server := daemon.NewServer(lifecycle) + server := daemon.NewServerWithDeps( + lifecycle, + a.configLoader, + a.envFactory, + a.executor, + ) a.logger.Info("daemon starting") diff --git a/cli/internal/engine/scheduler/scheduler.go b/cli/internal/engine/scheduler/scheduler.go index f0078d1..8cc9574 100644 --- a/cli/internal/engine/scheduler/scheduler.go +++ b/cli/internal/engine/scheduler/scheduler.go @@ -3,6 +3,7 @@ package scheduler import ( "context" "errors" + "io" "os" "path/filepath" "runtime" @@ -40,6 +41,7 @@ type Scheduler struct { tracer ports.Tracer envFactory ports.EnvironmentFactory daemon ports.DaemonClient + noDaemon bool // When true, skip remote execution mu sync.RWMutex taskStatus map[domain.InternedString]TaskStatus @@ -74,6 +76,12 @@ func (s *Scheduler) WithDaemon(daemon ports.DaemonClient) *Scheduler { return s } +// WithNoDaemon sets whether to skip remote execution and returns itself for chaining. +func (s *Scheduler) WithNoDaemon(noDaemon bool) *Scheduler { + s.noDaemon = noDaemon + return s +} + // initTaskStatuses initializes the status of tasks in the graph to Pending. func (s *Scheduler) initTaskStatuses(tasks []domain.InternedString) { s.mu.Lock() @@ -443,6 +451,10 @@ func (state *schedulerRunState) schedule() { } func (state *schedulerRunState) executeTask(t *domain.Task) { + state.executeWithStrategy(t) +} + +func (state *schedulerRunState) executeWithStrategy(t *domain.Task) { // Execute the task logic within a function to ensure the span is ended // BEFORE we send the result to the channel. This prevents race conditions // in tests where the scheduler loop finishes before the span is recorded. @@ -494,8 +506,8 @@ func (state *schedulerRunState) executeTask(t *domain.Task) { env = cachedEnv.([]string) } - // Step 4: Execute - err = state.s.executor.Execute(ctx, t, env, span, span) + // Step 4: Execute (Remote or Local) + err = state.executeWithFallback(ctx, t, env, span, span) if err != nil { span.RecordError(err) } @@ -511,6 +523,41 @@ func (state *schedulerRunState) executeTask(t *domain.Task) { state.resultsCh <- res } +func (state *schedulerRunState) executeWithFallback( + ctx context.Context, + t *domain.Task, + env []string, + stdout, stderr io.Writer, +) error { + var execErr error + + // Try remote execution via daemon if available and not disabled + if state.s.daemon != nil && !state.s.noDaemon { + execErr = state.s.daemon.ExecuteTask(ctx, t, env, stdout, stderr) + if execErr != nil && isConnectionError(execErr) { + // Fallback to local on connection errors only + execErr = state.s.executor.Execute(ctx, t, env, stdout, stderr) + } + } else { + // Local execution + execErr = state.s.executor.Execute(ctx, t, env, stdout, stderr) + } + + return execErr +} + +// isConnectionError checks if the error is a connection-related error. +func isConnectionError(err error) bool { + if err == nil { + return false + } + // Check for gRPC connection errors + errStr := err.Error() + return strings.Contains(errStr, "connection") || + strings.Contains(errStr, "transport") || + strings.Contains(errStr, "unavailable") +} + func (state *schedulerRunState) computeInputHash(t *domain.Task) (skipped bool, hash string, err error) { // If task is configured to always rebuild, bypass cache if t.RebuildStrategy == domain.RebuildAlways { From eb895011c58a85510a3e0324b1485bcca6a0d1cf Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 00:41:24 +0100 Subject: [PATCH 12/20] feat(#74): add --no-daemon flag to run command - Add --no-daemon flag to same run command - Pass flag through app.Run to scheduler.WithNoDaemon - Allow explicit bypass of remote daemon execution - Update help text to document flag behavior Use cases: - Force local execution even when daemon is available - Debugging remote execution issues - Testing local vs remote behavior differences --- cli/cmd/same/commands/run.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cli/cmd/same/commands/run.go b/cli/cmd/same/commands/run.go index 67055b9..3f6e472 100644 --- a/cli/cmd/same/commands/run.go +++ b/cli/cmd/same/commands/run.go @@ -20,6 +20,7 @@ func (c *CLI) newRunCmd() *cobra.Command { inspect, _ := cmd.Flags().GetBool("inspect") outputMode, _ := cmd.Flags().GetString("output-mode") ci, _ := cmd.Flags().GetBool("ci") + noDaemon, _ := cmd.Flags().GetBool("no-daemon") // If --ci is set, override output-mode to "linear" if ci { @@ -30,6 +31,7 @@ func (c *CLI) newRunCmd() *cobra.Command { NoCache: noCache, Inspect: inspect, OutputMode: outputMode, + NoDaemon: noDaemon, }) }, } @@ -37,5 +39,6 @@ func (c *CLI) newRunCmd() *cobra.Command { cmd.Flags().BoolP("inspect", "i", false, "Inspect the TUI after build completion (prevents auto-exit)") cmd.Flags().StringP("output-mode", "o", "auto", "Output mode: auto, tui, or linear") cmd.Flags().Bool("ci", false, "Use linear output mode (shorthand for --output-mode=linear)") + cmd.Flags().Bool("no-daemon", false, "Bypass remote daemon execution and run locally") return cmd } From dd3615958549369382996ef27197cedcb52b3eb4 Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 00:41:36 +0100 Subject: [PATCH 13/20] chore(#74): add buf.lock for googleapis dependency - Lock buf dependency on googleapis for google/rpc/status.proto - Required for ExecuteTask RPC error handling --- cli/buf.lock | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 cli/buf.lock diff --git a/cli/buf.lock b/cli/buf.lock new file mode 100644 index 0000000..9fe6d00 --- /dev/null +++ b/cli/buf.lock @@ -0,0 +1,8 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: googleapis + repository: googleapis + commit: 004180b77378443887d3b55cabc00384 + digest: shake256:d26c7c2fd95f0873761af33ca4a0c0d92c8577122b6feb74eb3b0a57ebe47a98ab24a209a0e91945ac4c77204e9da0c2de0020b2cedc27bdbcdea6c431eec69b From 54ab76a1bfd34cbcb2b0249e4c31bd4a778faf53 Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 01:07:20 +0100 Subject: [PATCH 14/20] refactor(#74): improve error handling and robustness - Extract actual exit codes from executor errors instead of always returning 1 - Handle malformed exit code trailer values and preserve original errors - Replace string-based error detection with proper gRPC status codes - Remove unused google/rpc/status.proto import - Document stderr parameter handling in PTY mode - Add robust error chain traversal for wrapped gRPC errors - Include both Unavailable and DeadlineExceeded as connection errors --- cli/api/daemon/v1/daemon.proto | 2 -- cli/go.mod | 2 +- cli/internal/adapters/daemon/client.go | 14 +++++++-- cli/internal/adapters/daemon/server.go | 35 ++++++++++++++++++---- cli/internal/engine/scheduler/scheduler.go | 26 +++++++++++----- 5 files changed, 61 insertions(+), 18 deletions(-) diff --git a/cli/api/daemon/v1/daemon.proto b/cli/api/daemon/v1/daemon.proto index 90c9377..cf238fd 100644 --- a/cli/api/daemon/v1/daemon.proto +++ b/cli/api/daemon/v1/daemon.proto @@ -4,8 +4,6 @@ package daemon.v1; option go_package = "go.trai.ch/same/api/daemon/v1;daemonv1"; -import "google/rpc/status.proto"; - service DaemonService { // Ping checks daemon health and resets the inactivity timer. rpc Ping(PingRequest) returns (PingResponse); diff --git a/cli/go.mod b/cli/go.mod index 8afbf35..e087bc5 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -21,6 +21,7 @@ require ( go.uber.org/mock v0.6.0 golang.org/x/sync v0.19.0 golang.org/x/term v0.39.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda google.golang.org/grpc v1.78.0 google.golang.org/protobuf v1.36.11 gopkg.in/yaml.v3 v3.0.1 @@ -59,5 +60,4 @@ require ( golang.org/x/sys v0.40.0 // indirect golang.org/x/text v0.32.0 // indirect golang.org/x/tools v0.40.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect ) diff --git a/cli/internal/adapters/daemon/client.go b/cli/internal/adapters/daemon/client.go index 9adae64..83466aa 100644 --- a/cli/internal/adapters/daemon/client.go +++ b/cli/internal/adapters/daemon/client.go @@ -178,6 +178,9 @@ func (c *Client) GetInputHash( } // ExecuteTask implements ports.DaemonClient. +// Note: stderr is intentionally merged into stdout for PTY mode. This is because +// PTY sessions combine both output streams by design. For non-PTY scenarios, +// consider separate stderr handling in the Executor implementation. func (c *Client) ExecuteTask( ctx context.Context, task *domain.Task, @@ -223,7 +226,10 @@ func (c *Client) ExecuteTask( // Check trailer for success case trailer := stream.Trailer() if exitStr := trailer.Get("x-exit-code"); len(exitStr) > 0 { - exitCode, _ := strconv.Atoi(exitStr[0]) + exitCode, err := strconv.Atoi(exitStr[0]) + if err != nil { + return zerr.Wrap(err, "malformed exit code in trailer") + } if exitCode != 0 { return zerr.With(domain.ErrTaskExecutionFailed, "exit_code", exitCode) } @@ -244,7 +250,11 @@ func (c *Client) handleExecuteError(err error, stream grpc.ClientStream) error { // Try to extract exit code from trailer trailer := stream.Trailer() if exitStr := trailer.Get("x-exit-code"); len(exitStr) > 0 { - exitCode, _ := strconv.Atoi(exitStr[0]) + exitCode, parseErr := strconv.Atoi(exitStr[0]) + if parseErr != nil { + wrapped := zerr.Wrap(parseErr, "malformed exit code in trailer") + return zerr.With(wrapped, "original_error", err.Error()) + } return zerr.With(domain.ErrTaskExecutionFailed, "exit_code", exitCode) } // If no trailer, return the status error diff --git a/cli/internal/adapters/daemon/server.go b/cli/internal/adapters/daemon/server.go index cbe7d96..b66112f 100644 --- a/cli/internal/adapters/daemon/server.go +++ b/cli/internal/adapters/daemon/server.go @@ -2,6 +2,7 @@ package daemon import ( "context" + "errors" "fmt" "net" "os" @@ -332,6 +333,33 @@ func (w *streamWriter) Write(p []byte) (int, error) { return len(p), nil } +// getExitCode extracts the exit code from an error. +// It returns 0 for no error, or the actual exit code if the error +// contains one via zerr field, defaulting to 1 for generic errors. +func getExitCode(err error) int { + if err == nil { + return 0 + } + + // Check if this is a zerr with an exit_code field + // zerr implements an interface that allows field extraction + type fielder interface { + Field(key string) (interface{}, bool) + } + + var fieldErr fielder + if errors.As(err, &fieldErr) { + if code, found := fieldErr.Field("exit_code"); found { + if exitCode, ok := code.(int); ok { + return exitCode + } + } + } + + // Default to exit code 1 for generic errors + return 1 +} + // ExecuteTask implements DaemonService.ExecuteTask. func (s *Server) ExecuteTask( req *daemonv1.ExecuteTaskRequest, @@ -359,11 +387,8 @@ func (s *Server) ExecuteTask( // Execute with PTY (via executor) err := s.executor.Execute(stream.Context(), task, req.NixEnvironment, writer, writer) - // Extract exit code - exitCode := 0 - if err != nil { - exitCode = 1 - } + // Extract exit code from error + exitCode := getExitCode(err) // Set trailer with exit code stream.SetTrailer(metadata.Pairs("x-exit-code", strconv.Itoa(exitCode))) diff --git a/cli/internal/engine/scheduler/scheduler.go b/cli/internal/engine/scheduler/scheduler.go index 8cc9574..c867162 100644 --- a/cli/internal/engine/scheduler/scheduler.go +++ b/cli/internal/engine/scheduler/scheduler.go @@ -16,6 +16,8 @@ import ( "go.trai.ch/same/internal/core/ports" "go.trai.ch/zerr" "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // TaskStatus represents the status of a task. @@ -93,10 +95,10 @@ func (s *Scheduler) initTaskStatuses(tasks []domain.InternedString) { } // updateStatus updates the status of a task. -func (s *Scheduler) updateStatus(name domain.InternedString, status TaskStatus) { +func (s *Scheduler) updateStatus(name domain.InternedString, taskStatus TaskStatus) { s.mu.Lock() defer s.mu.Unlock() - s.taskStatus[name] = status + s.taskStatus[name] = taskStatus } // Run executes the tasks in the graph with the specified parallelism. @@ -546,16 +548,24 @@ func (state *schedulerRunState) executeWithFallback( return execErr } -// isConnectionError checks if the error is a connection-related error. +// isConnectionError checks if the error is a gRPC connection-related error. func isConnectionError(err error) bool { if err == nil { return false } - // Check for gRPC connection errors - errStr := err.Error() - return strings.Contains(errStr, "connection") || - strings.Contains(errStr, "transport") || - strings.Contains(errStr, "unavailable") + + // Unwrap error chain to handle wrapped gRPC errors + for unwrapped := err; unwrapped != nil; unwrapped = errors.Unwrap(unwrapped) { + st, ok := status.FromError(unwrapped) + if ok { + // Check for codes indicating connection issues + switch st.Code() { + case codes.Unavailable, codes.DeadlineExceeded: + return true + } + } + } + return false } func (state *schedulerRunState) computeInputHash(t *domain.Task) (skipped bool, hash string, err error) { From d3106d3987bae2edf616de60fbf20e9ed0405f3e Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 11:43:38 +0100 Subject: [PATCH 15/20] fix(#74): add missing validation and absolute paths - Add graph.Validate() call in GetGraph to populate executionOrder - Use absolute socket path in client - Use absolute working directory in app for robustness - Update tests to accept any path with gomock.Any() --- cli/cmd/same/commands/commands_test.go | 6 +++--- cli/internal/adapters/daemon/client.go | 7 ++++++- cli/internal/adapters/daemon/server.go | 5 +++++ cli/internal/app/app.go | 15 ++++++++++----- cli/internal/app/app_test.go | 20 ++++++++++---------- go.work.sum | 16 ++++++++++++++++ 6 files changed, 50 insertions(+), 19 deletions(-) diff --git a/cli/cmd/same/commands/commands_test.go b/cli/cmd/same/commands/commands_test.go index 162a15b..327edc2 100644 --- a/cli/cmd/same/commands/commands_test.go +++ b/cli/cmd/same/commands/commands_test.go @@ -47,7 +47,7 @@ func TestRun_Success(t *testing.T) { mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) // 1. Loader.Load is called first - mockLoader.EXPECT().Load(".").Return(g, nil).Times(1) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil).Times(1) mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil).Times(1) // 2. Hasher.ComputeInputHash is called once to compute input hash @@ -384,8 +384,8 @@ func TestRun_OutputModeFlags(t *testing.T) { cli := commands.New(a) mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) - mockLoader.EXPECT().Load(".").Return(g, nil).Times(1) - mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil).Times(1) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil).Times(1) + mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil).Times(1) mockHasher.EXPECT().ComputeInputHash(gomock.Any(), gomock.Any(), gomock.Any()).Return("hash123", nil).Times(1) mockStore.EXPECT().Get("build").Return(nil, nil).Times(1) mockExecutor.EXPECT().Execute( diff --git a/cli/internal/adapters/daemon/client.go b/cli/internal/adapters/daemon/client.go index 83466aa..b0e98ed 100644 --- a/cli/internal/adapters/daemon/client.go +++ b/cli/internal/adapters/daemon/client.go @@ -5,6 +5,7 @@ package daemon import ( "context" "io" + "path/filepath" "strconv" "time" @@ -28,7 +29,11 @@ type Client struct { // Note: grpc.NewClient returns immediately; actual connection happens lazily on first RPC. func Dial() (*Client, error) { socketPath := domain.DefaultDaemonSocketPath() - target := "unix://" + socketPath + absSocketPath, err := filepath.Abs(socketPath) + if err != nil { + return nil, zerr.Wrap(err, "failed to resolve absolute socket path") + } + target := "unix://" + absSocketPath conn, err := grpc.NewClient(target, grpc.WithTransportCredentials(insecure.NewCredentials()), diff --git a/cli/internal/adapters/daemon/server.go b/cli/internal/adapters/daemon/server.go index b66112f..b2801ab 100644 --- a/cli/internal/adapters/daemon/server.go +++ b/cli/internal/adapters/daemon/server.go @@ -195,6 +195,11 @@ func (s *Server) GetGraph(ctx context.Context, req *daemonv1.GetGraphRequest) (* return nil, zerr.Wrap(err, "failed to load graph") } + // Validate the graph to populate executionOrder for Walk() + if err := graph.Validate(); err != nil { + return nil, zerr.Wrap(err, "failed to validate graph") + } + // Store in cache entry := &domain.GraphCacheEntry{ Graph: graph, diff --git a/cli/internal/app/app.go b/cli/internal/app/app.go index 6592643..7bf4098 100644 --- a/cli/internal/app/app.go +++ b/cli/internal/app/app.go @@ -86,11 +86,16 @@ type RunOptions struct { // //nolint:cyclop // orchestration function func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) error { + // 0. Get absolute path of current working directory + cwd, err := os.Getwd() + if err != nil { + return zerr.Wrap(err, "failed to get current working directory") + } + // 1. Connect to daemon (if available) and load graph from daemon or fallback to local var graph *domain.Graph var client ports.DaemonClient var daemonAvailable bool - var err error client, clientErr := a.connector.Connect(ctx) if clientErr == nil && client != nil { @@ -101,23 +106,23 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er }() // Discover config paths and mtimes - mtimes, mtimeErr := a.configLoader.DiscoverConfigPaths(".") + mtimes, mtimeErr := a.configLoader.DiscoverConfigPaths(cwd) if mtimeErr != nil { return zerr.Wrap(mtimeErr, "failed to discover config paths") } // Try to get graph from daemon - graph, _, err = client.GetGraph(ctx, ".", mtimes) + graph, _, err = client.GetGraph(ctx, cwd, mtimes) if err != nil { // Fallback to local loading if daemon fails - if graph, err = a.configLoader.Load("."); err != nil { + if graph, err = a.configLoader.Load(cwd); err != nil { return zerr.Wrap(err, "failed to load configuration") } } } else { // Daemon not available, use local loading var loadErr error - graph, loadErr = a.configLoader.Load(".") + graph, loadErr = a.configLoader.Load(cwd) if loadErr != nil { return zerr.Wrap(loadErr, "failed to load configuration") } diff --git a/cli/internal/app/app_test.go b/cli/internal/app/app_test.go index 16b4ba0..2d325cb 100644 --- a/cli/internal/app/app_test.go +++ b/cli/internal/app/app_test.go @@ -65,9 +65,9 @@ func TestApp_Build(t *testing.T) { WithDisableTick() mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) - mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) + mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) // Expectations - mockLoader.EXPECT().Load(".").Return(g, nil) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) mockStore.EXPECT().Get("task1").Return(nil, nil) mockExecutor.EXPECT().Execute(gomock.Any(), task, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) @@ -125,7 +125,7 @@ func TestApp_Run_NoTargets(t *testing.T) { mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) // Expectations - mockLoader.EXPECT().Load(".").Return(domain.NewGraph(), nil) + mockLoader.EXPECT().Load(gomock.Any()).Return(domain.NewGraph(), nil) // Execute err = a.Run(context.Background(), nil, app.RunOptions{NoCache: false}) @@ -181,7 +181,7 @@ func TestApp_Run_ConfigLoaderError(t *testing.T) { mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) // Expectations - loader fails - mockLoader.EXPECT().Load(".").Return(nil, errors.New("config load error")) + mockLoader.EXPECT().Load(gomock.Any()).Return(nil, errors.New("config load error")) // Execute err = a.Run(context.Background(), []string{"task1"}, app.RunOptions{NoCache: false}) @@ -246,9 +246,9 @@ func TestApp_Run_BuildExecutionFailed(t *testing.T) { WithDisableTick() mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) - mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) + mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) // Expectations - mockLoader.EXPECT().Load(".").Return(g, nil) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) mockStore.EXPECT().Get("task1").Return(nil, nil) // Mock Executor failure @@ -444,8 +444,8 @@ func TestApp_Run_LinearMode(t *testing.T) { WithDisableTick() mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) - mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) - mockLoader.EXPECT().Load(".").Return(g, nil) + mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) mockStore.EXPECT().Get("task1").Return(nil, nil) mockExecutor.EXPECT().Execute(gomock.Any(), task, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) @@ -506,8 +506,8 @@ func TestApp_Run_InspectMode(t *testing.T) { WithDisableTick() mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) - mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) - mockLoader.EXPECT().Load(".").Return(g, nil) + mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) mockStore.EXPECT().Get("task1").Return(nil, nil) mockExecutor.EXPECT().Execute(gomock.Any(), task, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) diff --git a/go.work.sum b/go.work.sum index 5681213..5483c55 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1,8 +1,11 @@ cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= @@ -19,6 +22,7 @@ github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a/go.mod github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0= github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY= github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= @@ -26,25 +30,33 @@ github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA= github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= @@ -52,11 +64,14 @@ github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= @@ -66,4 +81,5 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= From 0bb1d74d4edac38b1dc39ac07f3231471f451ce6 Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 11:52:40 +0100 Subject: [PATCH 16/20] perf(#74): configure aggressive gRPC backoff for UDS - Add backoff constants (50ms base, 200ms max, 1.5 multiplier) - Configure grpc.WithConnectParams for faster connection establishment - Optimizes local Unix Domain Socket connection behavior" --- cli/internal/adapters/daemon/client.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/cli/internal/adapters/daemon/client.go b/cli/internal/adapters/daemon/client.go index b0e98ed..05705e3 100644 --- a/cli/internal/adapters/daemon/client.go +++ b/cli/internal/adapters/daemon/client.go @@ -14,11 +14,20 @@ import ( "go.trai.ch/same/internal/core/ports" "go.trai.ch/zerr" "google.golang.org/grpc" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" ) +const ( + // gRPC backoff configuration for fast connection establishment. + grpcBaseDelay = 50 * time.Millisecond + grpcMaxDelay = 200 * time.Millisecond + grpcMinConnectTimeout = 100 * time.Millisecond + grpcBackoffMultiplier = 1.5 +) + // Client implements ports.DaemonClient. type Client struct { conn *grpc.ClientConn @@ -37,6 +46,14 @@ func Dial() (*Client, error) { conn, err := grpc.NewClient(target, grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{ + BaseDelay: grpcBaseDelay, + Multiplier: grpcBackoffMultiplier, + MaxDelay: grpcMaxDelay, + }, + MinConnectTimeout: grpcMinConnectTimeout, + }), ) if err != nil { return nil, zerr.Wrap(err, "daemon client creation failed") From b365f721656deb385b1c91f9af3501fdf097f666 Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 12:14:10 +0100 Subject: [PATCH 17/20] fix(#74): prevent status from resetting timer and auto-starting daemon - Remove ResetTimer() from Ping() RPC (liveness check shouldn't count as activity) - Remove ResetTimer() from Status() RPC (read-only query shouldn't count as activity) - Add IsRunning() guard in DaemonStatus() to prevent daemon auto-spawn - Format Last Activity as HH:MM:SS (X ago) for better readability Changes only real work (GetGraph, GetEnvironment, GetInputHash, ExecuteTask) to reset the idle timer. --- cli/internal/adapters/daemon/server.go | 2 -- cli/internal/app/app.go | 18 ++++++++++++------ 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/cli/internal/adapters/daemon/server.go b/cli/internal/adapters/daemon/server.go index b2801ab..f3beb9a 100644 --- a/cli/internal/adapters/daemon/server.go +++ b/cli/internal/adapters/daemon/server.go @@ -127,7 +127,6 @@ func (s *Server) cleanup() { // Ping implements DaemonService.Ping. func (s *Server) Ping(_ context.Context, _ *daemonv1.PingRequest) (*daemonv1.PingResponse, error) { - s.lifecycle.ResetTimer() return &daemonv1.PingResponse{ IdleRemainingSeconds: int64(s.lifecycle.IdleRemaining().Seconds()), }, nil @@ -135,7 +134,6 @@ func (s *Server) Ping(_ context.Context, _ *daemonv1.PingRequest) (*daemonv1.Pin // Status implements DaemonService.Status. func (s *Server) Status(_ context.Context, _ *daemonv1.StatusRequest) (*daemonv1.StatusResponse, error) { - s.lifecycle.ResetTimer() pid := os.Getpid() const maxInt32 = 2147483647 if pid > maxInt32 { diff --git a/cli/internal/app/app.go b/cli/internal/app/app.go index 7bf4098..312ae11 100644 --- a/cli/internal/app/app.go +++ b/cli/internal/app/app.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "runtime" + "time" tea "github.com/charmbracelet/bubbletea" "go.opentelemetry.io/otel" @@ -284,6 +285,11 @@ func (a *App) ServeDaemon(ctx context.Context) error { // DaemonStatus returns the current daemon status. func (a *App) DaemonStatus(ctx context.Context) error { + if !a.connector.IsRunning() { + a.logger.Info("Running: false") + return nil + } + client, err := a.connector.Connect(ctx) if err != nil { return zerr.Wrap(err, "failed to connect to daemon") @@ -297,12 +303,12 @@ func (a *App) DaemonStatus(ctx context.Context) error { return zerr.Wrap(err, "failed to get daemon status") } - a.logger.Info("Daemon Status:") - a.logger.Info(fmt.Sprintf(" Running: %v", status.Running)) - a.logger.Info(fmt.Sprintf(" PID: %d", status.PID)) - a.logger.Info(fmt.Sprintf(" Uptime: %v", status.Uptime)) - a.logger.Info(fmt.Sprintf(" Last Activity: %v", status.LastActivity)) - a.logger.Info(fmt.Sprintf(" Idle Remaining: %v", status.IdleRemaining)) + a.logger.Info(fmt.Sprintf("Running: %v", status.Running)) + a.logger.Info(fmt.Sprintf("PID: %d", status.PID)) + a.logger.Info(fmt.Sprintf("Uptime: %v", status.Uptime)) + ago := time.Since(status.LastActivity).Truncate(time.Second) + a.logger.Info(fmt.Sprintf("Last Activity: %s (%s ago)", status.LastActivity.Format("15:04:05"), ago)) + a.logger.Info(fmt.Sprintf("Idle Remaining: %v", status.IdleRemaining)) return nil } From 478d673fae61fe2504f5e8221eaa49e9862e7e8f Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 12:17:18 +0100 Subject: [PATCH 18/20] chore(#74): update same vendor hash --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index d92a9b8..c23cd87 100644 --- a/flake.nix +++ b/flake.nix @@ -44,7 +44,7 @@ inherit version; src = ./cli; - vendorHash = "sha256-O9y+DIxt8YcqlP499Ns5ECHEWV2IENy6nAH25Leh1AI="; + vendorHash = "sha256-bPO2Kqn45RNR9H5bf+Tfsqa9h7PtHSVEPTWqxx9+pc0="; env.CGO_ENABLED = 0; From b38cdaa0fa7763a88fa4fcf624c6888ecd4eae2f Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 12:38:04 +0100 Subject: [PATCH 19/20] fix(#74): ensure --no-daemon bypasses all daemon communication Previously, the --no-daemon flag only prevented task execution via daemon but still connected to fetch the graph, updating the daemon's last activity timestamp. This caused 'same daemon status' to show recent activity even when --no-daemon was used. Changes: - Check opts.NoDaemon before any daemon connection - Load graph locally when --no-daemon is set - Eliminate duplicated fallback code for cleaner logic - Fix variable shadowing issue with client The daemon's last activity now correctly remains unchanged when using --no-daemon flag. --- cli/internal/app/app.go | 52 +++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/cli/internal/app/app.go b/cli/internal/app/app.go index 312ae11..c27796e 100644 --- a/cli/internal/app/app.go +++ b/cli/internal/app/app.go @@ -93,39 +93,41 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er return zerr.Wrap(err, "failed to get current working directory") } - // 1. Connect to daemon (if available) and load graph from daemon or fallback to local + // 1. Connect to daemon (if available and not disabled) and load graph from daemon or fallback to local var graph *domain.Graph var client ports.DaemonClient var daemonAvailable bool - client, clientErr := a.connector.Connect(ctx) - if clientErr == nil && client != nil { - // Daemon is available, try to get graph from daemon - daemonAvailable = true - defer func() { - _ = client.Close() - }() + if !opts.NoDaemon { + var clientErr error + client, clientErr = a.connector.Connect(ctx) + if clientErr == nil && client != nil { + // Daemon is available, try to get graph from daemon + daemonAvailable = true + defer func() { + _ = client.Close() + }() + + // Discover config paths and mtimes + mtimes, mtimeErr := a.configLoader.DiscoverConfigPaths(cwd) + if mtimeErr != nil { + return zerr.Wrap(mtimeErr, "failed to discover config paths") + } - // Discover config paths and mtimes - mtimes, mtimeErr := a.configLoader.DiscoverConfigPaths(cwd) - if mtimeErr != nil { - return zerr.Wrap(mtimeErr, "failed to discover config paths") + // Try to get graph from daemon + graph, _, err = client.GetGraph(ctx, cwd, mtimes) + if err != nil { + // On daemon error, we'll fall through to local loading + graph = nil + } } + } - // Try to get graph from daemon - graph, _, err = client.GetGraph(ctx, cwd, mtimes) + // Load graph locally if not already loaded from daemon + if graph == nil || opts.NoDaemon { + graph, err = a.configLoader.Load(cwd) if err != nil { - // Fallback to local loading if daemon fails - if graph, err = a.configLoader.Load(cwd); err != nil { - return zerr.Wrap(err, "failed to load configuration") - } - } - } else { - // Daemon not available, use local loading - var loadErr error - graph, loadErr = a.configLoader.Load(cwd) - if loadErr != nil { - return zerr.Wrap(loadErr, "failed to load configuration") + return zerr.Wrap(err, "failed to load configuration") } } From 3cee89ab276ed68859be9b0cf1d8127c00449b6c Mon Sep 17 00:00:00 2001 From: Luca Fondo Date: Wed, 28 Jan 2026 15:04:30 +0100 Subject: [PATCH 20/20] feat(#74): anchor .same directory to workspace root instead of cwd --- cli/cmd/same/commands/commands_test.go | 41 ++++++++------- cli/internal/adapters/config/loader.go | 29 +++++++++-- cli/internal/adapters/daemon/client.go | 17 ++++--- cli/internal/adapters/daemon/spawner.go | 48 +++++++++++------- cli/internal/app/app.go | 64 +++++++++++++++++++----- cli/internal/app/app_test.go | 23 ++++++--- cli/internal/core/ports/config_loader.go | 4 ++ cli/internal/core/ports/daemon.go | 11 ++-- 8 files changed, 166 insertions(+), 71 deletions(-) diff --git a/cli/cmd/same/commands/commands_test.go b/cli/cmd/same/commands/commands_test.go index 327edc2..a98d3f6 100644 --- a/cli/cmd/same/commands/commands_test.go +++ b/cli/cmd/same/commands/commands_test.go @@ -44,7 +44,8 @@ func TestRun_Success(t *testing.T) { // Setup strict expectations in the correct sequence // 0. Daemon connection fails (daemon not available, fallback to local) - mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(".", nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) // 1. Loader.Load is called first mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil).Times(1) @@ -205,24 +206,25 @@ func TestVersionCmd(t *testing.T) { } // setupCleanTest creates a test CLI with mocked dependencies for clean command tests. -func setupCleanTest(t *testing.T) (*commands.CLI, *mocks.MockLogger) { +func setupCleanTest(t *testing.T) (*commands.CLI, *mocks.MockLogger, string) { t.Helper() cwd, err := os.Getwd() if err != nil { t.Fatalf("Failed to get current working directory: %v", err) } - defer func() { - if errChdir := os.Chdir(cwd); errChdir != nil { - t.Fatalf("Failed to restore working directory: %v", errChdir) - } - }() tmpDir := t.TempDir() if errChdir := os.Chdir(tmpDir); errChdir != nil { t.Fatalf("Failed to change into temp directory: %v", errChdir) } + t.Cleanup(func() { + if errChdir := os.Chdir(cwd); errChdir != nil { + t.Fatalf("Failed to restore working directory: %v", errChdir) + } + }) + ctrl := gomock.NewController(t) t.Cleanup(ctrl.Finish) @@ -235,10 +237,12 @@ func setupCleanTest(t *testing.T) (*commands.CLI, *mocks.MockLogger) { mockLogger := mocks.NewMockLogger(ctrl) mockConnector := mocks.NewMockDaemonConnector(ctrl) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil).AnyTimes() + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) - return commands.New(a), mockLogger + return commands.New(a), mockLogger, tmpDir } func createDirWithMarker(t *testing.T, dirPath string) { @@ -253,10 +257,10 @@ func createDirWithMarker(t *testing.T, dirPath string) { } func TestCleanCmd_Default(t *testing.T) { - cli, mockLogger := setupCleanTest(t) + cli, mockLogger, tmpDir := setupCleanTest(t) mockLogger.EXPECT().Info(gomock.Any()).AnyTimes() - storePath := filepath.Join(domain.DefaultSamePath(), domain.StoreDirName) + storePath := filepath.Join(tmpDir, domain.DefaultSamePath(), domain.StoreDirName) if err := os.MkdirAll(storePath, domain.DirPerm); err != nil { t.Fatalf("Failed to create store directory: %v", err) } @@ -277,10 +281,10 @@ func TestCleanCmd_Default(t *testing.T) { } func TestCleanCmd_Tools(t *testing.T) { - cli, mockLogger := setupCleanTest(t) + cli, mockLogger, tmpDir := setupCleanTest(t) mockLogger.EXPECT().Info(gomock.Any()).AnyTimes() - nixHubPath := domain.DefaultNixHubCachePath() + nixHubPath := filepath.Join(tmpDir, domain.DefaultNixHubCachePath()) if err := os.MkdirAll(nixHubPath, domain.DirPerm); err != nil { t.Fatalf("Failed to create nixhub cache directory: %v", err) } @@ -289,7 +293,7 @@ func TestCleanCmd_Tools(t *testing.T) { t.Fatalf("Failed to create marker file: %v", err) } - envPath := domain.DefaultEnvCachePath() + envPath := filepath.Join(tmpDir, domain.DefaultEnvCachePath()) if err := os.MkdirAll(envPath, domain.DirPerm); err != nil { t.Fatalf("Failed to create env cache directory: %v", err) } @@ -313,16 +317,16 @@ func TestCleanCmd_Tools(t *testing.T) { } func TestCleanCmd_All(t *testing.T) { - cli, mockLogger := setupCleanTest(t) + cli, mockLogger, tmpDir := setupCleanTest(t) mockLogger.EXPECT().Info(gomock.Any()).AnyTimes() - storePath := filepath.Join(domain.DefaultSamePath(), domain.StoreDirName) + storePath := filepath.Join(tmpDir, domain.DefaultSamePath(), domain.StoreDirName) createDirWithMarker(t, storePath) - nixHubPath := domain.DefaultNixHubCachePath() + nixHubPath := filepath.Join(tmpDir, domain.DefaultNixHubCachePath()) createDirWithMarker(t, nixHubPath) - envPath := domain.DefaultEnvCachePath() + envPath := filepath.Join(tmpDir, domain.DefaultEnvCachePath()) createDirWithMarker(t, envPath) cli.SetArgs([]string{"clean", "--all"}) @@ -383,7 +387,8 @@ func TestRun_OutputModeFlags(t *testing.T) { cli := commands.New(a) - mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(".", nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil).Times(1) mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil).Times(1) mockHasher.EXPECT().ComputeInputHash(gomock.Any(), gomock.Any(), gomock.Any()).Return("hash123", nil).Times(1) diff --git a/cli/internal/adapters/config/loader.go b/cli/internal/adapters/config/loader.go index bfc4e60..0805b80 100644 --- a/cli/internal/adapters/config/loader.go +++ b/cli/internal/adapters/config/loader.go @@ -55,35 +55,54 @@ func (l *Loader) Load(cwd string) (*domain.Graph, error) { } func (l *Loader) findConfiguration(cwd string) (string, Mode, error) { + root, err := l.DiscoverRoot(cwd) + if err != nil { + return "", "", err + } + + workfilePath := filepath.Join(root, domain.WorkFileName) + if _, err := os.Stat(workfilePath); err == nil { + return workfilePath, ModeWorkspace, nil + } + + samefilePath := filepath.Join(root, domain.SameFileName) + if _, err := os.Stat(samefilePath); err == nil { + return samefilePath, ModeStandalone, nil + } + + return "", "", zerr.With(domain.ErrConfigNotFound, "cwd", cwd) +} + +// DiscoverRoot walks up from cwd to find the workspace root. +func (l *Loader) DiscoverRoot(cwd string) (string, error) { currentDir := cwd var standaloneCandidate string for { workfilePath := filepath.Join(currentDir, domain.WorkFileName) if _, err := os.Stat(workfilePath); err == nil { - return workfilePath, ModeWorkspace, nil + return currentDir, nil } if standaloneCandidate == "" { samefilePath := filepath.Join(currentDir, domain.SameFileName) if _, err := os.Stat(samefilePath); err == nil { - standaloneCandidate = samefilePath + standaloneCandidate = currentDir } } parentDir := filepath.Dir(currentDir) if parentDir == currentDir { - // Reached root break } currentDir = parentDir } if standaloneCandidate != "" { - return standaloneCandidate, ModeStandalone, nil + return standaloneCandidate, nil } - return "", "", zerr.With(domain.ErrConfigNotFound, "cwd", cwd) + return "", zerr.With(domain.ErrConfigNotFound, "cwd", cwd) } func (l *Loader) loadSamefile(configPath string) (*domain.Graph, error) { diff --git a/cli/internal/adapters/daemon/client.go b/cli/internal/adapters/daemon/client.go index 05705e3..03f83a3 100644 --- a/cli/internal/adapters/daemon/client.go +++ b/cli/internal/adapters/daemon/client.go @@ -34,15 +34,20 @@ type Client struct { client daemonv1.DaemonServiceClient } -// Dial connects to the daemon over UDS. +// Dial connects to the daemon over UDS at the specified workspace root. // Note: grpc.NewClient returns immediately; actual connection happens lazily on first RPC. -func Dial() (*Client, error) { - socketPath := domain.DefaultDaemonSocketPath() - absSocketPath, err := filepath.Abs(socketPath) +func Dial(root string) (*Client, error) { + if root == "" { + return nil, zerr.New("root cannot be empty") + } + + absRoot, err := filepath.Abs(root) if err != nil { - return nil, zerr.Wrap(err, "failed to resolve absolute socket path") + return nil, zerr.Wrap(err, "failed to resolve absolute root path") } - target := "unix://" + absSocketPath + + socketPath := filepath.Join(absRoot, domain.DefaultDaemonSocketPath()) + target := "unix://" + socketPath conn, err := grpc.NewClient(target, grpc.WithTransportCredentials(insecure.NewCredentials()), diff --git a/cli/internal/adapters/daemon/spawner.go b/cli/internal/adapters/daemon/spawner.go index 1702dc2..7336d60 100644 --- a/cli/internal/adapters/daemon/spawner.go +++ b/cli/internal/adapters/daemon/spawner.go @@ -33,8 +33,8 @@ func NewConnector() (*Connector, error) { } // Connect returns a client, spawning the daemon if necessary. -func (c *Connector) Connect(ctx context.Context) (ports.DaemonClient, error) { - client, err := Dial() +func (c *Connector) Connect(ctx context.Context, root string) (ports.DaemonClient, error) { + client, err := Dial(root) if err == nil { if pingErr := client.Ping(ctx); pingErr == nil { return client, nil @@ -42,11 +42,11 @@ func (c *Connector) Connect(ctx context.Context) (ports.DaemonClient, error) { _ = client.Close() } - if spawnErr := c.Spawn(ctx); spawnErr != nil { + if spawnErr := c.Spawn(ctx, root); spawnErr != nil { return nil, spawnErr } - client, err = Dial() + client, err = Dial(root) if err != nil { return nil, zerr.Wrap(err, "daemon client creation failed") } @@ -60,16 +60,20 @@ func (c *Connector) Connect(ctx context.Context) (ports.DaemonClient, error) { } // IsRunning checks if the daemon is running and responsive. -func (c *Connector) IsRunning() bool { +func (c *Connector) IsRunning(root string) bool { + if root == "" { + return false + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - return c.isRunningWithCtx(ctx) + return c.isRunningWithCtx(ctx, root) } // isRunningWithCtx checks if the daemon is running and responsive, respecting the provided context. -func (c *Connector) isRunningWithCtx(ctx context.Context) bool { - client, err := Dial() +func (c *Connector) isRunningWithCtx(ctx context.Context, root string) bool { + client, err := Dial(root) if err != nil { return false } @@ -83,14 +87,23 @@ func (c *Connector) isRunningWithCtx(ctx context.Context) bool { } // Spawn starts the daemon process in the background. -func (c *Connector) Spawn(ctx context.Context) error { - daemonDir := filepath.Dir(domain.DefaultDaemonSocketPath()) - if err := os.MkdirAll(daemonDir, domain.DirPerm); err != nil { - return zerr.Wrap(err, "failed to create daemon directory") +func (c *Connector) Spawn(ctx context.Context, root string) error { + if root == "" { + return zerr.New("root cannot be empty") + } + + absRoot, err := filepath.Abs(root) + if err != nil { + return zerr.Wrap(err, "failed to resolve absolute root path") + } + + daemonDir := filepath.Join(absRoot, filepath.Dir(domain.DefaultDaemonSocketPath())) + if mkdirErr := os.MkdirAll(daemonDir, domain.DirPerm); mkdirErr != nil { + return zerr.Wrap(mkdirErr, "failed to create daemon directory") } - logPath := domain.DefaultDaemonLogPath() - //nolint:gosec // G304: logPath is from domain.DefaultDaemonLogPath(), not user input + logPath := filepath.Join(absRoot, domain.DefaultDaemonLogPath()) + //nolint:gosec // G304: logPath is from root + domain constant, not user input logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, domain.PrivateFilePerm) if err != nil { return zerr.Wrap(err, "failed to open daemon log") @@ -98,6 +111,7 @@ func (c *Connector) Spawn(ctx context.Context) error { //nolint:gosec // G204: executablePath is controlled, args are fixed literals cmd := exec.Command(c.executablePath, "daemon", "serve") + cmd.Dir = absRoot cmd.Stdout = logFile cmd.Stderr = logFile cmd.SysProcAttr = &syscall.SysProcAttr{ @@ -114,7 +128,7 @@ func (c *Connector) Spawn(ctx context.Context) error { _ = logFile.Close() }() - if err := c.waitForDaemonStartup(ctx); err != nil { + if err := c.waitForDaemonStartup(ctx, absRoot); err != nil { return err } @@ -122,7 +136,7 @@ func (c *Connector) Spawn(ctx context.Context) error { } // waitForDaemonStartup waits for the daemon to become responsive. -func (c *Connector) waitForDaemonStartup(ctx context.Context) error { +func (c *Connector) waitForDaemonStartup(ctx context.Context, root string) error { start := time.Now() for time.Since(start) < maxPollDuration { select { @@ -130,7 +144,7 @@ func (c *Connector) waitForDaemonStartup(ctx context.Context) error { return ctx.Err() default: } - if c.isRunningWithCtx(ctx) { + if c.isRunningWithCtx(ctx, root) { return nil } select { diff --git a/cli/internal/app/app.go b/cli/internal/app/app.go index c27796e..986cd04 100644 --- a/cli/internal/app/app.go +++ b/cli/internal/app/app.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "os" + "path/filepath" "runtime" "time" @@ -93,14 +94,20 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er return zerr.Wrap(err, "failed to get current working directory") } - // 1. Connect to daemon (if available and not disabled) and load graph from daemon or fallback to local + // 1. Discover workspace root + root, err := a.configLoader.DiscoverRoot(cwd) + if err != nil { + return zerr.Wrap(err, "failed to discover workspace root") + } + + // 2. Connect to daemon (if available and not disabled) and load graph from daemon or fallback to local var graph *domain.Graph var client ports.DaemonClient var daemonAvailable bool if !opts.NoDaemon { var clientErr error - client, clientErr = a.connector.Connect(ctx) + client, clientErr = a.connector.Connect(ctx, root) if clientErr == nil && client != nil { // Daemon is available, try to get graph from daemon daemonAvailable = true @@ -131,12 +138,12 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er } } - // 2. Validate targets + // 3. Validate targets if len(targetNames) == 0 { return domain.ErrNoTargetsSpecified } - // 3. Initialize Renderer + // 4. Initialize Renderer // Detect environment and resolve output mode autoMode := detector.DetectEnvironment() mode := detector.ResolveMode(autoMode, opts.OutputMode) @@ -153,7 +160,7 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er renderer = linear.NewRenderer(os.Stdout, os.Stderr) } - // 4. Initialize Telemetry + // 5. Initialize Telemetry // Create a bridge that sends OTel spans to the renderer. bridge := telemetry.NewBridge(renderer) @@ -169,7 +176,7 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er _ = tracer.Shutdown(ctx) }() - // 5. Initialize Scheduler + // 6. Initialize Scheduler sched := scheduler.NewScheduler( a.executor, a.store, @@ -184,7 +191,7 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er sched.WithDaemon(client) } - // 6. Run Renderer and Scheduler concurrently + // 7. Run Renderer and Scheduler concurrently g, ctx := errgroup.WithContext(ctx) // Renderer Routine @@ -231,6 +238,17 @@ type CleanOptions struct { // Clean removes cache and build artifacts based on the provided options. func (a *App) Clean(_ context.Context, options CleanOptions) error { + // Discover workspace root + cwd, err := os.Getwd() + if err != nil { + return zerr.Wrap(err, "failed to get current working directory") + } + + root, err := a.configLoader.DiscoverRoot(cwd) + if err != nil { + return zerr.Wrap(err, "failed to discover workspace root") + } + var errs error // Helper to remove a directory and log the action @@ -245,12 +263,12 @@ func (a *App) Clean(_ context.Context, options CleanOptions) error { } if options.Build { - remove(domain.DefaultStorePath(), "build info store") + remove(filepath.Join(root, domain.DefaultStorePath()), "build info store") } if options.Tools { - remove(domain.DefaultNixHubCachePath(), "nix tool cache") - remove(domain.DefaultEnvCachePath(), "environment cache") + remove(filepath.Join(root, domain.DefaultNixHubCachePath()), "nix tool cache") + remove(filepath.Join(root, domain.DefaultEnvCachePath()), "environment cache") } return errs @@ -287,12 +305,22 @@ func (a *App) ServeDaemon(ctx context.Context) error { // DaemonStatus returns the current daemon status. func (a *App) DaemonStatus(ctx context.Context) error { - if !a.connector.IsRunning() { + cwd, err := os.Getwd() + if err != nil { + return zerr.Wrap(err, "failed to get current working directory") + } + + root, err := a.configLoader.DiscoverRoot(cwd) + if err != nil { + return zerr.Wrap(err, "failed to discover workspace root") + } + + if !a.connector.IsRunning(root) { a.logger.Info("Running: false") return nil } - client, err := a.connector.Connect(ctx) + client, err := a.connector.Connect(ctx, root) if err != nil { return zerr.Wrap(err, "failed to connect to daemon") } @@ -317,7 +345,17 @@ func (a *App) DaemonStatus(ctx context.Context) error { // StopDaemon stops the daemon. func (a *App) StopDaemon(ctx context.Context) error { - client, err := a.connector.Connect(ctx) + cwd, err := os.Getwd() + if err != nil { + return zerr.Wrap(err, "failed to get current working directory") + } + + root, err := a.configLoader.DiscoverRoot(cwd) + if err != nil { + return zerr.Wrap(err, "failed to discover workspace root") + } + + client, err := a.connector.Connect(ctx, root) if err != nil { return zerr.Wrap(err, "failed to connect to daemon") } diff --git a/cli/internal/app/app_test.go b/cli/internal/app/app_test.go index 2d325cb..4196bc4 100644 --- a/cli/internal/app/app_test.go +++ b/cli/internal/app/app_test.go @@ -64,7 +64,8 @@ func TestApp_Build(t *testing.T) { ). WithDisableTick() - mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) // Expectations mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) @@ -123,7 +124,8 @@ func TestApp_Run_NoTargets(t *testing.T) { ). WithDisableTick() - mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) // Expectations mockLoader.EXPECT().Load(gomock.Any()).Return(domain.NewGraph(), nil) @@ -179,7 +181,8 @@ func TestApp_Run_ConfigLoaderError(t *testing.T) { ). WithDisableTick() - mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) // Expectations - loader fails mockLoader.EXPECT().Load(gomock.Any()).Return(nil, errors.New("config load error")) @@ -245,7 +248,8 @@ func TestApp_Run_BuildExecutionFailed(t *testing.T) { ). WithDisableTick() - mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) // Expectations mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) @@ -380,13 +384,16 @@ func TestApp_Clean(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() + mockLoader := mocks.NewMockConfigLoader(ctrl) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockLogger := mocks.NewMockLogger(ctrl) // We expect some logs, but we can be loose or strict. // Let's just allow any Info calls. mockLogger.EXPECT().Info(gomock.Any()).AnyTimes() // Null dependencies for others - a := app.New(nil, nil, mockLogger, nil, nil, nil, nil, nil) + a := app.New(mockLoader, nil, mockLogger, nil, nil, nil, nil, nil) err = a.Clean(context.Background(), tt.options) if err != nil { @@ -443,7 +450,8 @@ func TestApp_Run_LinearMode(t *testing.T) { ). WithDisableTick() - mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) @@ -505,7 +513,8 @@ func TestApp_Run_InspectMode(t *testing.T) { ). WithDisableTick() - mockConnector.EXPECT().Connect(gomock.Any()).Return(nil, errors.New("daemon not available")) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) diff --git a/cli/internal/core/ports/config_loader.go b/cli/internal/core/ports/config_loader.go index 4e51006..668a524 100644 --- a/cli/internal/core/ports/config_loader.go +++ b/cli/internal/core/ports/config_loader.go @@ -12,4 +12,8 @@ type ConfigLoader interface { // DiscoverConfigPaths finds configuration file paths and their modification times. // Returns a map of config file paths to their mtime in UnixNano. DiscoverConfigPaths(cwd string) (map[string]int64, error) + + // DiscoverRoot walks up from cwd to find the workspace root. + // Returns the directory containing same.work.yaml or same.yaml. + DiscoverRoot(cwd string) (string, error) } diff --git a/cli/internal/core/ports/daemon.go b/cli/internal/core/ports/daemon.go index fe75e55..6287fd5 100644 --- a/cli/internal/core/ports/daemon.go +++ b/cli/internal/core/ports/daemon.go @@ -67,11 +67,12 @@ type DaemonClient interface { // DaemonConnector manages daemon lifecycle from the CLI perspective. type DaemonConnector interface { // Connect returns a client to the daemon, spawning it if necessary. - Connect(ctx context.Context) (DaemonClient, error) + // root is the workspace root directory where the daemon operates. + Connect(ctx context.Context, root string) (DaemonClient, error) - // IsRunning checks if the daemon process is currently running. - IsRunning() bool + // IsRunning checks if the daemon process is currently running at the given root. + IsRunning(root string) bool - // Spawn starts a new daemon process in the background. - Spawn(ctx context.Context) error + // Spawn starts a new daemon process in the background at the given root. + Spawn(ctx context.Context, root string) error }