diff --git a/cli/api/daemon/v1/daemon.pb.go b/cli/api/daemon/v1/daemon.pb.go new file mode 100644 index 0000000..e7fb4e2 --- /dev/null +++ b/cli/api/daemon/v1/daemon.pb.go @@ -0,0 +1,1166 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: api/daemon/v1/daemon.proto + +package daemonv1 + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + _ "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetInputHashResponse_State int32 + +const ( + GetInputHashResponse_READY GetInputHashResponse_State = 0 + GetInputHashResponse_PENDING GetInputHashResponse_State = 1 + GetInputHashResponse_UNKNOWN GetInputHashResponse_State = 2 +) + +// Enum value maps for GetInputHashResponse_State. +var ( + GetInputHashResponse_State_name = map[int32]string{ + 0: "READY", + 1: "PENDING", + 2: "UNKNOWN", + } + GetInputHashResponse_State_value = map[string]int32{ + "READY": 0, + "PENDING": 1, + "UNKNOWN": 2, + } +) + +func (x GetInputHashResponse_State) Enum() *GetInputHashResponse_State { + p := new(GetInputHashResponse_State) + *p = x + return p +} + +func (x GetInputHashResponse_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GetInputHashResponse_State) Descriptor() protoreflect.EnumDescriptor { + return file_api_daemon_v1_daemon_proto_enumTypes[0].Descriptor() +} + +func (GetInputHashResponse_State) Type() protoreflect.EnumType { + return &file_api_daemon_v1_daemon_proto_enumTypes[0] +} + +func (x GetInputHashResponse_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GetInputHashResponse_State.Descriptor instead. +func (GetInputHashResponse_State) EnumDescriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{13, 0} +} + +type PingRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PingRequest) Reset() { + *x = PingRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PingRequest) ProtoMessage() {} + +func (x *PingRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PingRequest.ProtoReflect.Descriptor instead. +func (*PingRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{0} +} + +type PingResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // idle_remaining_seconds is the time remaining before auto-shutdown. + IdleRemainingSeconds int64 `protobuf:"varint,1,opt,name=idle_remaining_seconds,json=idleRemainingSeconds,proto3" json:"idle_remaining_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PingResponse) Reset() { + *x = PingResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PingResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PingResponse) ProtoMessage() {} + +func (x *PingResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PingResponse.ProtoReflect.Descriptor instead. +func (*PingResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{1} +} + +func (x *PingResponse) GetIdleRemainingSeconds() int64 { + if x != nil { + return x.IdleRemainingSeconds + } + return 0 +} + +type StatusRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusRequest) Reset() { + *x = StatusRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusRequest) ProtoMessage() {} + +func (x *StatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead. +func (*StatusRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{2} +} + +type StatusResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Running bool `protobuf:"varint,1,opt,name=running,proto3" json:"running,omitempty"` + Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + UptimeSeconds int64 `protobuf:"varint,3,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` + LastActivityUnix int64 `protobuf:"varint,4,opt,name=last_activity_unix,json=lastActivityUnix,proto3" json:"last_activity_unix,omitempty"` + IdleRemainingSeconds int64 `protobuf:"varint,5,opt,name=idle_remaining_seconds,json=idleRemainingSeconds,proto3" json:"idle_remaining_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse) Reset() { + *x = StatusResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse) ProtoMessage() {} + +func (x *StatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. +func (*StatusResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{3} +} + +func (x *StatusResponse) GetRunning() bool { + if x != nil { + return x.Running + } + return false +} + +func (x *StatusResponse) GetPid() int32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *StatusResponse) GetUptimeSeconds() int64 { + if x != nil { + return x.UptimeSeconds + } + return 0 +} + +func (x *StatusResponse) GetLastActivityUnix() int64 { + if x != nil { + return x.LastActivityUnix + } + return 0 +} + +func (x *StatusResponse) GetIdleRemainingSeconds() int64 { + if x != nil { + return x.IdleRemainingSeconds + } + return 0 +} + +type ShutdownRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // graceful indicates whether to wait for in-flight operations. + Graceful bool `protobuf:"varint,1,opt,name=graceful,proto3" json:"graceful,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ShutdownRequest) Reset() { + *x = ShutdownRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ShutdownRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownRequest) ProtoMessage() {} + +func (x *ShutdownRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownRequest.ProtoReflect.Descriptor instead. +func (*ShutdownRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{4} +} + +func (x *ShutdownRequest) GetGraceful() bool { + if x != nil { + return x.Graceful + } + return false +} + +type ShutdownResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ShutdownResponse) Reset() { + *x = ShutdownResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ShutdownResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownResponse) ProtoMessage() {} + +func (x *ShutdownResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownResponse.ProtoReflect.Descriptor instead. +func (*ShutdownResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{5} +} + +func (x *ShutdownResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +type ConfigMtime struct { + state protoimpl.MessageState `protogen:"open.v1"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + MtimeUnixNano int64 `protobuf:"varint,2,opt,name=mtime_unix_nano,json=mtimeUnixNano,proto3" json:"mtime_unix_nano,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConfigMtime) Reset() { + *x = ConfigMtime{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConfigMtime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigMtime) ProtoMessage() {} + +func (x *ConfigMtime) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigMtime.ProtoReflect.Descriptor instead. +func (*ConfigMtime) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{6} +} + +func (x *ConfigMtime) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *ConfigMtime) GetMtimeUnixNano() int64 { + if x != nil { + return x.MtimeUnixNano + } + return 0 +} + +type GetGraphRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Cwd string `protobuf:"bytes,1,opt,name=cwd,proto3" json:"cwd,omitempty"` + ConfigMtimes []*ConfigMtime `protobuf:"bytes,2,rep,name=config_mtimes,json=configMtimes,proto3" json:"config_mtimes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetGraphRequest) Reset() { + *x = GetGraphRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetGraphRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetGraphRequest) ProtoMessage() {} + +func (x *GetGraphRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetGraphRequest.ProtoReflect.Descriptor instead. +func (*GetGraphRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{7} +} + +func (x *GetGraphRequest) GetCwd() string { + if x != nil { + return x.Cwd + } + return "" +} + +func (x *GetGraphRequest) GetConfigMtimes() []*ConfigMtime { + if x != nil { + return x.ConfigMtimes + } + return nil +} + +type TaskProto struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Command []string `protobuf:"bytes,2,rep,name=command,proto3" json:"command,omitempty"` + Inputs []string `protobuf:"bytes,3,rep,name=inputs,proto3" json:"inputs,omitempty"` + Outputs []string `protobuf:"bytes,4,rep,name=outputs,proto3" json:"outputs,omitempty"` + Tools map[string]string `protobuf:"bytes,5,rep,name=tools,proto3" json:"tools,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Dependencies []string `protobuf:"bytes,6,rep,name=dependencies,proto3" json:"dependencies,omitempty"` + Environment map[string]string `protobuf:"bytes,7,rep,name=environment,proto3" json:"environment,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + WorkingDir string `protobuf:"bytes,8,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` + RebuildStrategy string `protobuf:"bytes,9,opt,name=rebuild_strategy,json=rebuildStrategy,proto3" json:"rebuild_strategy,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TaskProto) Reset() { + *x = TaskProto{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TaskProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskProto) ProtoMessage() {} + +func (x *TaskProto) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskProto.ProtoReflect.Descriptor instead. +func (*TaskProto) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{8} +} + +func (x *TaskProto) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *TaskProto) GetCommand() []string { + if x != nil { + return x.Command + } + return nil +} + +func (x *TaskProto) GetInputs() []string { + if x != nil { + return x.Inputs + } + return nil +} + +func (x *TaskProto) GetOutputs() []string { + if x != nil { + return x.Outputs + } + return nil +} + +func (x *TaskProto) GetTools() map[string]string { + if x != nil { + return x.Tools + } + return nil +} + +func (x *TaskProto) GetDependencies() []string { + if x != nil { + return x.Dependencies + } + return nil +} + +func (x *TaskProto) GetEnvironment() map[string]string { + if x != nil { + return x.Environment + } + return nil +} + +func (x *TaskProto) GetWorkingDir() string { + if x != nil { + return x.WorkingDir + } + return "" +} + +func (x *TaskProto) GetRebuildStrategy() string { + if x != nil { + return x.RebuildStrategy + } + return "" +} + +type GetGraphResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + CacheHit bool `protobuf:"varint,1,opt,name=cache_hit,json=cacheHit,proto3" json:"cache_hit,omitempty"` + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + Tasks []*TaskProto `protobuf:"bytes,3,rep,name=tasks,proto3" json:"tasks,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetGraphResponse) Reset() { + *x = GetGraphResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetGraphResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetGraphResponse) ProtoMessage() {} + +func (x *GetGraphResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetGraphResponse.ProtoReflect.Descriptor instead. +func (*GetGraphResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{9} +} + +func (x *GetGraphResponse) GetCacheHit() bool { + if x != nil { + return x.CacheHit + } + return false +} + +func (x *GetGraphResponse) GetRoot() string { + if x != nil { + return x.Root + } + return "" +} + +func (x *GetGraphResponse) GetTasks() []*TaskProto { + if x != nil { + return x.Tasks + } + return nil +} + +type GetEnvironmentRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + EnvId string `protobuf:"bytes,1,opt,name=env_id,json=envId,proto3" json:"env_id,omitempty"` + Tools map[string]string `protobuf:"bytes,2,rep,name=tools,proto3" json:"tools,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetEnvironmentRequest) Reset() { + *x = GetEnvironmentRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetEnvironmentRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEnvironmentRequest) ProtoMessage() {} + +func (x *GetEnvironmentRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEnvironmentRequest.ProtoReflect.Descriptor instead. +func (*GetEnvironmentRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{10} +} + +func (x *GetEnvironmentRequest) GetEnvId() string { + if x != nil { + return x.EnvId + } + return "" +} + +func (x *GetEnvironmentRequest) GetTools() map[string]string { + if x != nil { + return x.Tools + } + return nil +} + +type GetEnvironmentResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + CacheHit bool `protobuf:"varint,1,opt,name=cache_hit,json=cacheHit,proto3" json:"cache_hit,omitempty"` + EnvVars []string `protobuf:"bytes,2,rep,name=env_vars,json=envVars,proto3" json:"env_vars,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetEnvironmentResponse) Reset() { + *x = GetEnvironmentResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetEnvironmentResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEnvironmentResponse) ProtoMessage() {} + +func (x *GetEnvironmentResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEnvironmentResponse.ProtoReflect.Descriptor instead. +func (*GetEnvironmentResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{11} +} + +func (x *GetEnvironmentResponse) GetCacheHit() bool { + if x != nil { + return x.CacheHit + } + return false +} + +func (x *GetEnvironmentResponse) GetEnvVars() []string { + if x != nil { + return x.EnvVars + } + return nil +} + +type GetInputHashRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskName string `protobuf:"bytes,1,opt,name=task_name,json=taskName,proto3" json:"task_name,omitempty"` + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + Environment map[string]string `protobuf:"bytes,3,rep,name=environment,proto3" json:"environment,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetInputHashRequest) Reset() { + *x = GetInputHashRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetInputHashRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetInputHashRequest) ProtoMessage() {} + +func (x *GetInputHashRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetInputHashRequest.ProtoReflect.Descriptor instead. +func (*GetInputHashRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{12} +} + +func (x *GetInputHashRequest) GetTaskName() string { + if x != nil { + return x.TaskName + } + return "" +} + +func (x *GetInputHashRequest) GetRoot() string { + if x != nil { + return x.Root + } + return "" +} + +func (x *GetInputHashRequest) GetEnvironment() map[string]string { + if x != nil { + return x.Environment + } + return nil +} + +type GetInputHashResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + State GetInputHashResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=daemon.v1.GetInputHashResponse_State" json:"state,omitempty"` + Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetInputHashResponse) Reset() { + *x = GetInputHashResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetInputHashResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetInputHashResponse) ProtoMessage() {} + +func (x *GetInputHashResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetInputHashResponse.ProtoReflect.Descriptor instead. +func (*GetInputHashResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{13} +} + +func (x *GetInputHashResponse) GetState() GetInputHashResponse_State { + if x != nil { + return x.State + } + return GetInputHashResponse_READY +} + +func (x *GetInputHashResponse) GetHash() string { + if x != nil { + return x.Hash + } + return "" +} + +type ExecuteTaskRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskName string `protobuf:"bytes,1,opt,name=task_name,json=taskName,proto3" json:"task_name,omitempty"` + Command []string `protobuf:"bytes,2,rep,name=command,proto3" json:"command,omitempty"` + WorkingDir string `protobuf:"bytes,3,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` + TaskEnvironment map[string]string `protobuf:"bytes,4,rep,name=task_environment,json=taskEnvironment,proto3" json:"task_environment,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + NixEnvironment []string `protobuf:"bytes,5,rep,name=nix_environment,json=nixEnvironment,proto3" json:"nix_environment,omitempty"` + PtyRows int32 `protobuf:"varint,6,opt,name=pty_rows,json=ptyRows,proto3" json:"pty_rows,omitempty"` + PtyCols int32 `protobuf:"varint,7,opt,name=pty_cols,json=ptyCols,proto3" json:"pty_cols,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExecuteTaskRequest) Reset() { + *x = ExecuteTaskRequest{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExecuteTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteTaskRequest) ProtoMessage() {} + +func (x *ExecuteTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteTaskRequest.ProtoReflect.Descriptor instead. +func (*ExecuteTaskRequest) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{14} +} + +func (x *ExecuteTaskRequest) GetTaskName() string { + if x != nil { + return x.TaskName + } + return "" +} + +func (x *ExecuteTaskRequest) GetCommand() []string { + if x != nil { + return x.Command + } + return nil +} + +func (x *ExecuteTaskRequest) GetWorkingDir() string { + if x != nil { + return x.WorkingDir + } + return "" +} + +func (x *ExecuteTaskRequest) GetTaskEnvironment() map[string]string { + if x != nil { + return x.TaskEnvironment + } + return nil +} + +func (x *ExecuteTaskRequest) GetNixEnvironment() []string { + if x != nil { + return x.NixEnvironment + } + return nil +} + +func (x *ExecuteTaskRequest) GetPtyRows() int32 { + if x != nil { + return x.PtyRows + } + return 0 +} + +func (x *ExecuteTaskRequest) GetPtyCols() int32 { + if x != nil { + return x.PtyCols + } + return 0 +} + +type ExecuteTaskResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExecuteTaskResponse) Reset() { + *x = ExecuteTaskResponse{} + mi := &file_api_daemon_v1_daemon_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExecuteTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteTaskResponse) ProtoMessage() {} + +func (x *ExecuteTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_daemon_v1_daemon_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteTaskResponse.ProtoReflect.Descriptor instead. +func (*ExecuteTaskResponse) Descriptor() ([]byte, []int) { + return file_api_daemon_v1_daemon_proto_rawDescGZIP(), []int{15} +} + +func (x *ExecuteTaskResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +var File_api_daemon_v1_daemon_proto protoreflect.FileDescriptor + +const file_api_daemon_v1_daemon_proto_rawDesc = "" + + "\n" + + "\x1aapi/daemon/v1/daemon.proto\x12\tdaemon.v1\x1a\x17google/rpc/status.proto\"\r\n" + + "\vPingRequest\"D\n" + + "\fPingResponse\x124\n" + + "\x16idle_remaining_seconds\x18\x01 \x01(\x03R\x14idleRemainingSeconds\"\x0f\n" + + "\rStatusRequest\"\xc7\x01\n" + + "\x0eStatusResponse\x12\x18\n" + + "\arunning\x18\x01 \x01(\bR\arunning\x12\x10\n" + + "\x03pid\x18\x02 \x01(\x05R\x03pid\x12%\n" + + "\x0euptime_seconds\x18\x03 \x01(\x03R\ruptimeSeconds\x12,\n" + + "\x12last_activity_unix\x18\x04 \x01(\x03R\x10lastActivityUnix\x124\n" + + "\x16idle_remaining_seconds\x18\x05 \x01(\x03R\x14idleRemainingSeconds\"-\n" + + "\x0fShutdownRequest\x12\x1a\n" + + "\bgraceful\x18\x01 \x01(\bR\bgraceful\",\n" + + "\x10ShutdownResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\"I\n" + + "\vConfigMtime\x12\x12\n" + + "\x04path\x18\x01 \x01(\tR\x04path\x12&\n" + + "\x0fmtime_unix_nano\x18\x02 \x01(\x03R\rmtimeUnixNano\"`\n" + + "\x0fGetGraphRequest\x12\x10\n" + + "\x03cwd\x18\x01 \x01(\tR\x03cwd\x12;\n" + + "\rconfig_mtimes\x18\x02 \x03(\v2\x16.daemon.v1.ConfigMtimeR\fconfigMtimes\"\xd5\x03\n" + + "\tTaskProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\acommand\x18\x02 \x03(\tR\acommand\x12\x16\n" + + "\x06inputs\x18\x03 \x03(\tR\x06inputs\x12\x18\n" + + "\aoutputs\x18\x04 \x03(\tR\aoutputs\x125\n" + + "\x05tools\x18\x05 \x03(\v2\x1f.daemon.v1.TaskProto.ToolsEntryR\x05tools\x12\"\n" + + "\fdependencies\x18\x06 \x03(\tR\fdependencies\x12G\n" + + "\venvironment\x18\a \x03(\v2%.daemon.v1.TaskProto.EnvironmentEntryR\venvironment\x12\x1f\n" + + "\vworking_dir\x18\b \x01(\tR\n" + + "workingDir\x12)\n" + + "\x10rebuild_strategy\x18\t \x01(\tR\x0frebuildStrategy\x1a8\n" + + "\n" + + "ToolsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1a>\n" + + "\x10EnvironmentEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"o\n" + + "\x10GetGraphResponse\x12\x1b\n" + + "\tcache_hit\x18\x01 \x01(\bR\bcacheHit\x12\x12\n" + + "\x04root\x18\x02 \x01(\tR\x04root\x12*\n" + + "\x05tasks\x18\x03 \x03(\v2\x14.daemon.v1.TaskProtoR\x05tasks\"\xab\x01\n" + + "\x15GetEnvironmentRequest\x12\x15\n" + + "\x06env_id\x18\x01 \x01(\tR\x05envId\x12A\n" + + "\x05tools\x18\x02 \x03(\v2+.daemon.v1.GetEnvironmentRequest.ToolsEntryR\x05tools\x1a8\n" + + "\n" + + "ToolsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"P\n" + + "\x16GetEnvironmentResponse\x12\x1b\n" + + "\tcache_hit\x18\x01 \x01(\bR\bcacheHit\x12\x19\n" + + "\benv_vars\x18\x02 \x03(\tR\aenvVars\"\xd9\x01\n" + + "\x13GetInputHashRequest\x12\x1b\n" + + "\ttask_name\x18\x01 \x01(\tR\btaskName\x12\x12\n" + + "\x04root\x18\x02 \x01(\tR\x04root\x12Q\n" + + "\venvironment\x18\x03 \x03(\v2/.daemon.v1.GetInputHashRequest.EnvironmentEntryR\venvironment\x1a>\n" + + "\x10EnvironmentEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x95\x01\n" + + "\x14GetInputHashResponse\x12;\n" + + "\x05state\x18\x01 \x01(\x0e2%.daemon.v1.GetInputHashResponse.StateR\x05state\x12\x12\n" + + "\x04hash\x18\x02 \x01(\tR\x04hash\",\n" + + "\x05State\x12\t\n" + + "\x05READY\x10\x00\x12\v\n" + + "\aPENDING\x10\x01\x12\v\n" + + "\aUNKNOWN\x10\x02\"\xee\x02\n" + + "\x12ExecuteTaskRequest\x12\x1b\n" + + "\ttask_name\x18\x01 \x01(\tR\btaskName\x12\x18\n" + + "\acommand\x18\x02 \x03(\tR\acommand\x12\x1f\n" + + "\vworking_dir\x18\x03 \x01(\tR\n" + + "workingDir\x12]\n" + + "\x10task_environment\x18\x04 \x03(\v22.daemon.v1.ExecuteTaskRequest.TaskEnvironmentEntryR\x0ftaskEnvironment\x12'\n" + + "\x0fnix_environment\x18\x05 \x03(\tR\x0enixEnvironment\x12\x19\n" + + "\bpty_rows\x18\x06 \x01(\x05R\aptyRows\x12\x19\n" + + "\bpty_cols\x18\a \x01(\x05R\aptyCols\x1aB\n" + + "\x14TaskEnvironmentEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\")\n" + + "\x13ExecuteTaskResponse\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data2\x89\x04\n" + + "\rDaemonService\x127\n" + + "\x04Ping\x12\x16.daemon.v1.PingRequest\x1a\x17.daemon.v1.PingResponse\x12=\n" + + "\x06Status\x12\x18.daemon.v1.StatusRequest\x1a\x19.daemon.v1.StatusResponse\x12C\n" + + "\bShutdown\x12\x1a.daemon.v1.ShutdownRequest\x1a\x1b.daemon.v1.ShutdownResponse\x12C\n" + + "\bGetGraph\x12\x1a.daemon.v1.GetGraphRequest\x1a\x1b.daemon.v1.GetGraphResponse\x12U\n" + + "\x0eGetEnvironment\x12 .daemon.v1.GetEnvironmentRequest\x1a!.daemon.v1.GetEnvironmentResponse\x12O\n" + + "\fGetInputHash\x12\x1e.daemon.v1.GetInputHashRequest\x1a\x1f.daemon.v1.GetInputHashResponse\x12N\n" + + "\vExecuteTask\x12\x1d.daemon.v1.ExecuteTaskRequest\x1a\x1e.daemon.v1.ExecuteTaskResponse0\x01B(Z&go.trai.ch/same/api/daemon/v1;daemonv1b\x06proto3" + +var ( + file_api_daemon_v1_daemon_proto_rawDescOnce sync.Once + file_api_daemon_v1_daemon_proto_rawDescData []byte +) + +func file_api_daemon_v1_daemon_proto_rawDescGZIP() []byte { + file_api_daemon_v1_daemon_proto_rawDescOnce.Do(func() { + file_api_daemon_v1_daemon_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_api_daemon_v1_daemon_proto_rawDesc), len(file_api_daemon_v1_daemon_proto_rawDesc))) + }) + return file_api_daemon_v1_daemon_proto_rawDescData +} + +var file_api_daemon_v1_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_api_daemon_v1_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_api_daemon_v1_daemon_proto_goTypes = []any{ + (GetInputHashResponse_State)(0), // 0: daemon.v1.GetInputHashResponse.State + (*PingRequest)(nil), // 1: daemon.v1.PingRequest + (*PingResponse)(nil), // 2: daemon.v1.PingResponse + (*StatusRequest)(nil), // 3: daemon.v1.StatusRequest + (*StatusResponse)(nil), // 4: daemon.v1.StatusResponse + (*ShutdownRequest)(nil), // 5: daemon.v1.ShutdownRequest + (*ShutdownResponse)(nil), // 6: daemon.v1.ShutdownResponse + (*ConfigMtime)(nil), // 7: daemon.v1.ConfigMtime + (*GetGraphRequest)(nil), // 8: daemon.v1.GetGraphRequest + (*TaskProto)(nil), // 9: daemon.v1.TaskProto + (*GetGraphResponse)(nil), // 10: daemon.v1.GetGraphResponse + (*GetEnvironmentRequest)(nil), // 11: daemon.v1.GetEnvironmentRequest + (*GetEnvironmentResponse)(nil), // 12: daemon.v1.GetEnvironmentResponse + (*GetInputHashRequest)(nil), // 13: daemon.v1.GetInputHashRequest + (*GetInputHashResponse)(nil), // 14: daemon.v1.GetInputHashResponse + (*ExecuteTaskRequest)(nil), // 15: daemon.v1.ExecuteTaskRequest + (*ExecuteTaskResponse)(nil), // 16: daemon.v1.ExecuteTaskResponse + nil, // 17: daemon.v1.TaskProto.ToolsEntry + nil, // 18: daemon.v1.TaskProto.EnvironmentEntry + nil, // 19: daemon.v1.GetEnvironmentRequest.ToolsEntry + nil, // 20: daemon.v1.GetInputHashRequest.EnvironmentEntry + nil, // 21: daemon.v1.ExecuteTaskRequest.TaskEnvironmentEntry +} +var file_api_daemon_v1_daemon_proto_depIdxs = []int32{ + 7, // 0: daemon.v1.GetGraphRequest.config_mtimes:type_name -> daemon.v1.ConfigMtime + 17, // 1: daemon.v1.TaskProto.tools:type_name -> daemon.v1.TaskProto.ToolsEntry + 18, // 2: daemon.v1.TaskProto.environment:type_name -> daemon.v1.TaskProto.EnvironmentEntry + 9, // 3: daemon.v1.GetGraphResponse.tasks:type_name -> daemon.v1.TaskProto + 19, // 4: daemon.v1.GetEnvironmentRequest.tools:type_name -> daemon.v1.GetEnvironmentRequest.ToolsEntry + 20, // 5: daemon.v1.GetInputHashRequest.environment:type_name -> daemon.v1.GetInputHashRequest.EnvironmentEntry + 0, // 6: daemon.v1.GetInputHashResponse.state:type_name -> daemon.v1.GetInputHashResponse.State + 21, // 7: daemon.v1.ExecuteTaskRequest.task_environment:type_name -> daemon.v1.ExecuteTaskRequest.TaskEnvironmentEntry + 1, // 8: daemon.v1.DaemonService.Ping:input_type -> daemon.v1.PingRequest + 3, // 9: daemon.v1.DaemonService.Status:input_type -> daemon.v1.StatusRequest + 5, // 10: daemon.v1.DaemonService.Shutdown:input_type -> daemon.v1.ShutdownRequest + 8, // 11: daemon.v1.DaemonService.GetGraph:input_type -> daemon.v1.GetGraphRequest + 11, // 12: daemon.v1.DaemonService.GetEnvironment:input_type -> daemon.v1.GetEnvironmentRequest + 13, // 13: daemon.v1.DaemonService.GetInputHash:input_type -> daemon.v1.GetInputHashRequest + 15, // 14: daemon.v1.DaemonService.ExecuteTask:input_type -> daemon.v1.ExecuteTaskRequest + 2, // 15: daemon.v1.DaemonService.Ping:output_type -> daemon.v1.PingResponse + 4, // 16: daemon.v1.DaemonService.Status:output_type -> daemon.v1.StatusResponse + 6, // 17: daemon.v1.DaemonService.Shutdown:output_type -> daemon.v1.ShutdownResponse + 10, // 18: daemon.v1.DaemonService.GetGraph:output_type -> daemon.v1.GetGraphResponse + 12, // 19: daemon.v1.DaemonService.GetEnvironment:output_type -> daemon.v1.GetEnvironmentResponse + 14, // 20: daemon.v1.DaemonService.GetInputHash:output_type -> daemon.v1.GetInputHashResponse + 16, // 21: daemon.v1.DaemonService.ExecuteTask:output_type -> daemon.v1.ExecuteTaskResponse + 15, // [15:22] is the sub-list for method output_type + 8, // [8:15] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_api_daemon_v1_daemon_proto_init() } +func file_api_daemon_v1_daemon_proto_init() { + if File_api_daemon_v1_daemon_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_daemon_v1_daemon_proto_rawDesc), len(file_api_daemon_v1_daemon_proto_rawDesc)), + NumEnums: 1, + NumMessages: 21, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_api_daemon_v1_daemon_proto_goTypes, + DependencyIndexes: file_api_daemon_v1_daemon_proto_depIdxs, + EnumInfos: file_api_daemon_v1_daemon_proto_enumTypes, + MessageInfos: file_api_daemon_v1_daemon_proto_msgTypes, + }.Build() + File_api_daemon_v1_daemon_proto = out.File + file_api_daemon_v1_daemon_proto_goTypes = nil + file_api_daemon_v1_daemon_proto_depIdxs = nil +} diff --git a/cli/api/daemon/v1/daemon.proto b/cli/api/daemon/v1/daemon.proto new file mode 100644 index 0000000..cf238fd --- /dev/null +++ b/cli/api/daemon/v1/daemon.proto @@ -0,0 +1,122 @@ +syntax = "proto3"; + +package daemon.v1; + +option go_package = "go.trai.ch/same/api/daemon/v1;daemonv1"; + +service DaemonService { + // Ping checks daemon health and resets the inactivity timer. + rpc Ping(PingRequest) returns (PingResponse); + + // Status returns current daemon status information. + rpc Status(StatusRequest) returns (StatusResponse); + + // Shutdown initiates graceful daemon termination. + rpc Shutdown(ShutdownRequest) returns (ShutdownResponse); + + // GetGraph returns the parsed task graph, using mtime for cache invalidation. + rpc GetGraph(GetGraphRequest) returns (GetGraphResponse); + + // GetEnvironment returns resolved Nix environment variables for a toolset. + rpc GetEnvironment(GetEnvironmentRequest) returns (GetEnvironmentResponse); + + // GetInputHash returns the cached or pending input hash for a task. + rpc GetInputHash(GetInputHashRequest) returns (GetInputHashResponse); + + // ExecuteTask runs a task and streams logs back to the client. + rpc ExecuteTask(ExecuteTaskRequest) returns (stream ExecuteTaskResponse); +} + +message PingRequest {} + +message PingResponse { + // idle_remaining_seconds is the time remaining before auto-shutdown. + int64 idle_remaining_seconds = 1; +} + +message StatusRequest {} + +message StatusResponse { + bool running = 1; + int32 pid = 2; + int64 uptime_seconds = 3; + int64 last_activity_unix = 4; + int64 idle_remaining_seconds = 5; +} + +message ShutdownRequest { + // graceful indicates whether to wait for in-flight operations. + bool graceful = 1; +} + +message ShutdownResponse { + bool success = 1; +} + +message ConfigMtime { + string path = 1; + int64 mtime_unix_nano = 2; +} + +message GetGraphRequest { + string cwd = 1; + repeated ConfigMtime config_mtimes = 2; +} + +message TaskProto { + string name = 1; + repeated string command = 2; + repeated string inputs = 3; + repeated string outputs = 4; + map tools = 5; + repeated string dependencies = 6; + map environment = 7; + string working_dir = 8; + string rebuild_strategy = 9; +} + +message GetGraphResponse { + bool cache_hit = 1; + string root = 2; + repeated TaskProto tasks = 3; +} + +message GetEnvironmentRequest { + string env_id = 1; + map tools = 2; +} + +message GetEnvironmentResponse { + bool cache_hit = 1; + repeated string env_vars = 2; +} + +message GetInputHashRequest { + string task_name = 1; + string root = 2; + map environment = 3; +} + +message GetInputHashResponse { + enum State { + READY = 0; + PENDING = 1; + UNKNOWN = 2; + } + State state = 1; + string hash = 2; +} + +message ExecuteTaskRequest { + string task_name = 1; + repeated string command = 2; + string working_dir = 3; + map task_environment = 4; + repeated string nix_environment = 5; + int32 pty_rows = 6; + int32 pty_cols = 7; +} + +message ExecuteTaskResponse { + bytes data = 1; +} diff --git a/cli/api/daemon/v1/daemon_grpc.pb.go b/cli/api/daemon/v1/daemon_grpc.pb.go new file mode 100644 index 0000000..8d4a0cf --- /dev/null +++ b/cli/api/daemon/v1/daemon_grpc.pb.go @@ -0,0 +1,368 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.0 +// - protoc (unknown) +// source: api/daemon/v1/daemon.proto + +package daemonv1 + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + DaemonService_Ping_FullMethodName = "/daemon.v1.DaemonService/Ping" + DaemonService_Status_FullMethodName = "/daemon.v1.DaemonService/Status" + DaemonService_Shutdown_FullMethodName = "/daemon.v1.DaemonService/Shutdown" + DaemonService_GetGraph_FullMethodName = "/daemon.v1.DaemonService/GetGraph" + DaemonService_GetEnvironment_FullMethodName = "/daemon.v1.DaemonService/GetEnvironment" + DaemonService_GetInputHash_FullMethodName = "/daemon.v1.DaemonService/GetInputHash" + DaemonService_ExecuteTask_FullMethodName = "/daemon.v1.DaemonService/ExecuteTask" +) + +// DaemonServiceClient is the client API for DaemonService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DaemonServiceClient interface { + // Ping checks daemon health and resets the inactivity timer. + Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) + // Status returns current daemon status information. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // Shutdown initiates graceful daemon termination. + Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) + // GetGraph returns the parsed task graph, using mtime for cache invalidation. + GetGraph(ctx context.Context, in *GetGraphRequest, opts ...grpc.CallOption) (*GetGraphResponse, error) + // GetEnvironment returns resolved Nix environment variables for a toolset. + GetEnvironment(ctx context.Context, in *GetEnvironmentRequest, opts ...grpc.CallOption) (*GetEnvironmentResponse, error) + // GetInputHash returns the cached or pending input hash for a task. + GetInputHash(ctx context.Context, in *GetInputHashRequest, opts ...grpc.CallOption) (*GetInputHashResponse, error) + // ExecuteTask runs a task and streams logs back to the client. + ExecuteTask(ctx context.Context, in *ExecuteTaskRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExecuteTaskResponse], error) +} + +type daemonServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewDaemonServiceClient(cc grpc.ClientConnInterface) DaemonServiceClient { + return &daemonServiceClient{cc} +} + +func (c *daemonServiceClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(PingResponse) + err := c.cc.Invoke(ctx, DaemonService_Ping_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(StatusResponse) + err := c.cc.Invoke(ctx, DaemonService_Status_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ShutdownResponse) + err := c.cc.Invoke(ctx, DaemonService_Shutdown_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) GetGraph(ctx context.Context, in *GetGraphRequest, opts ...grpc.CallOption) (*GetGraphResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetGraphResponse) + err := c.cc.Invoke(ctx, DaemonService_GetGraph_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) GetEnvironment(ctx context.Context, in *GetEnvironmentRequest, opts ...grpc.CallOption) (*GetEnvironmentResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetEnvironmentResponse) + err := c.cc.Invoke(ctx, DaemonService_GetEnvironment_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) GetInputHash(ctx context.Context, in *GetInputHashRequest, opts ...grpc.CallOption) (*GetInputHashResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetInputHashResponse) + err := c.cc.Invoke(ctx, DaemonService_GetInputHash_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) ExecuteTask(ctx context.Context, in *ExecuteTaskRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExecuteTaskResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], DaemonService_ExecuteTask_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[ExecuteTaskRequest, ExecuteTaskResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type DaemonService_ExecuteTaskClient = grpc.ServerStreamingClient[ExecuteTaskResponse] + +// DaemonServiceServer is the server API for DaemonService service. +// All implementations must embed UnimplementedDaemonServiceServer +// for forward compatibility. +type DaemonServiceServer interface { + // Ping checks daemon health and resets the inactivity timer. + Ping(context.Context, *PingRequest) (*PingResponse, error) + // Status returns current daemon status information. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // Shutdown initiates graceful daemon termination. + Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) + // GetGraph returns the parsed task graph, using mtime for cache invalidation. + GetGraph(context.Context, *GetGraphRequest) (*GetGraphResponse, error) + // GetEnvironment returns resolved Nix environment variables for a toolset. + GetEnvironment(context.Context, *GetEnvironmentRequest) (*GetEnvironmentResponse, error) + // GetInputHash returns the cached or pending input hash for a task. + GetInputHash(context.Context, *GetInputHashRequest) (*GetInputHashResponse, error) + // ExecuteTask runs a task and streams logs back to the client. + ExecuteTask(*ExecuteTaskRequest, grpc.ServerStreamingServer[ExecuteTaskResponse]) error + mustEmbedUnimplementedDaemonServiceServer() +} + +// UnimplementedDaemonServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedDaemonServiceServer struct{} + +func (UnimplementedDaemonServiceServer) Ping(context.Context, *PingRequest) (*PingResponse, error) { + return nil, status.Error(codes.Unimplemented, "method Ping not implemented") +} +func (UnimplementedDaemonServiceServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) { + return nil, status.Error(codes.Unimplemented, "method Status not implemented") +} +func (UnimplementedDaemonServiceServer) Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) { + return nil, status.Error(codes.Unimplemented, "method Shutdown not implemented") +} +func (UnimplementedDaemonServiceServer) GetGraph(context.Context, *GetGraphRequest) (*GetGraphResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetGraph not implemented") +} +func (UnimplementedDaemonServiceServer) GetEnvironment(context.Context, *GetEnvironmentRequest) (*GetEnvironmentResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetEnvironment not implemented") +} +func (UnimplementedDaemonServiceServer) GetInputHash(context.Context, *GetInputHashRequest) (*GetInputHashResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetInputHash not implemented") +} +func (UnimplementedDaemonServiceServer) ExecuteTask(*ExecuteTaskRequest, grpc.ServerStreamingServer[ExecuteTaskResponse]) error { + return status.Error(codes.Unimplemented, "method ExecuteTask not implemented") +} +func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} +func (UnimplementedDaemonServiceServer) testEmbeddedByValue() {} + +// UnsafeDaemonServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DaemonServiceServer will +// result in compilation errors. +type UnsafeDaemonServiceServer interface { + mustEmbedUnimplementedDaemonServiceServer() +} + +func RegisterDaemonServiceServer(s grpc.ServiceRegistrar, srv DaemonServiceServer) { + // If the following call panics, it indicates UnimplementedDaemonServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&DaemonService_ServiceDesc, srv) +} + +func _DaemonService_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_Ping_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).Ping(ctx, req.(*PingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_Status_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ShutdownRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_Shutdown_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).Shutdown(ctx, req.(*ShutdownRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_GetGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGraphRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).GetGraph(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_GetGraph_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).GetGraph(ctx, req.(*GetGraphRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_GetEnvironment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEnvironmentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).GetEnvironment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_GetEnvironment_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).GetEnvironment(ctx, req.(*GetEnvironmentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_GetInputHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInputHashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).GetInputHash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_GetInputHash_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).GetInputHash(ctx, req.(*GetInputHashRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_ExecuteTask_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ExecuteTaskRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DaemonServiceServer).ExecuteTask(m, &grpc.GenericServerStream[ExecuteTaskRequest, ExecuteTaskResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type DaemonService_ExecuteTaskServer = grpc.ServerStreamingServer[ExecuteTaskResponse] + +// DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var DaemonService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "daemon.v1.DaemonService", + HandlerType: (*DaemonServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _DaemonService_Ping_Handler, + }, + { + MethodName: "Status", + Handler: _DaemonService_Status_Handler, + }, + { + MethodName: "Shutdown", + Handler: _DaemonService_Shutdown_Handler, + }, + { + MethodName: "GetGraph", + Handler: _DaemonService_GetGraph_Handler, + }, + { + MethodName: "GetEnvironment", + Handler: _DaemonService_GetEnvironment_Handler, + }, + { + MethodName: "GetInputHash", + Handler: _DaemonService_GetInputHash_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ExecuteTask", + Handler: _DaemonService_ExecuteTask_Handler, + ServerStreams: true, + }, + }, + Metadata: "api/daemon/v1/daemon.proto", +} diff --git a/cli/buf.gen.yaml b/cli/buf.gen.yaml new file mode 100644 index 0000000..7757009 --- /dev/null +++ b/cli/buf.gen.yaml @@ -0,0 +1,8 @@ +version: v1 +plugins: + - plugin: buf.build/protocolbuffers/go + out: . + opt: paths=source_relative + - plugin: buf.build/grpc/go + out: . + opt: paths=source_relative diff --git a/cli/buf.lock b/cli/buf.lock new file mode 100644 index 0000000..9fe6d00 --- /dev/null +++ b/cli/buf.lock @@ -0,0 +1,8 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: googleapis + repository: googleapis + commit: 004180b77378443887d3b55cabc00384 + digest: shake256:d26c7c2fd95f0873761af33ca4a0c0d92c8577122b6feb74eb3b0a57ebe47a98ab24a209a0e91945ac4c77204e9da0c2de0020b2cedc27bdbcdea6c431eec69b diff --git a/cli/buf.yaml b/cli/buf.yaml new file mode 100644 index 0000000..732d25a --- /dev/null +++ b/cli/buf.yaml @@ -0,0 +1,9 @@ +version: v1 +deps: + - buf.build/googleapis/googleapis +lint: + use: + - DEFAULT +breaking: + use: + - FILE diff --git a/cli/cmd/same/commands/commands_test.go b/cli/cmd/same/commands/commands_test.go index 000d092..4d26083 100644 --- a/cli/cmd/same/commands/commands_test.go +++ b/cli/cmd/same/commands/commands_test.go @@ -2,6 +2,7 @@ package commands_test import ( "context" + "errors" "io" "os" "path/filepath" @@ -26,30 +27,36 @@ func TestRun_Success(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Create a graph with one task named "build" g := domain.NewGraph() + g.SetRoot(".") buildTask := &domain.Task{Name: domain.NewInternedString("build"), WorkingDir: domain.NewInternedString("Root")} _ = g.AddTask(buildTask) // Setup app - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) // Initialize CLI cli := commands.New(a) // Setup strict expectations in the correct sequence + // 0. Daemon connection fails (daemon not available, fallback to local) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(".", nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) + // 1. Loader.Load is called first - mockLoader.EXPECT().Load(".").Return(g, nil).Times(1) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil).Times(1) mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil).Times(1) // 2. Hasher.ComputeInputHash is called once to compute input hash mockHasher.EXPECT().ComputeInputHash(gomock.Any(), gomock.Any(), gomock.Any()).Return("hash123", nil).Times(1) // 3. Store.Get is called once to check for cached build info (simulate cache miss by returning nil) - mockStore.EXPECT().Get("build").Return(nil, nil).Times(1) + mockStore.EXPECT().Get(".", "build").Return(nil, nil).Times(1) // 4. Executor.Execute is called once to run the task (since it's a cache miss) mockExecutor.EXPECT().Execute( @@ -57,10 +64,10 @@ func TestRun_Success(t *testing.T) { ).Return(nil).Times(1) // 5. Store.Put is called once to save the new build result - mockStore.EXPECT().Put(gomock.Any()).Return(nil).Times(1) + mockStore.EXPECT().Put(".", gomock.Any()).Return(nil).Times(1) // Set command args - cli.SetArgs([]string{"run", "build"}) + cli.SetArgs([]string{"run", "build", "--inspect-on-error=false"}) // Execute err := cli.Execute(context.Background()) @@ -81,10 +88,11 @@ func TestRun_NoTargets(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup app - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) // Initialize CLI @@ -113,10 +121,11 @@ func TestRoot_Help(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup app - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) // Initialize CLI @@ -144,10 +153,11 @@ func TestRoot_Version(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup app - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) // Initialize CLI @@ -175,10 +185,11 @@ func TestVersionCmd(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup app - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) // Initialize CLI @@ -196,24 +207,25 @@ func TestVersionCmd(t *testing.T) { } // setupCleanTest creates a test CLI with mocked dependencies for clean command tests. -func setupCleanTest(t *testing.T) (*commands.CLI, *mocks.MockLogger) { +func setupCleanTest(t *testing.T) (*commands.CLI, *mocks.MockLogger, string) { t.Helper() cwd, err := os.Getwd() if err != nil { t.Fatalf("Failed to get current working directory: %v", err) } - defer func() { - if errChdir := os.Chdir(cwd); errChdir != nil { - t.Fatalf("Failed to restore working directory: %v", errChdir) - } - }() tmpDir := t.TempDir() if errChdir := os.Chdir(tmpDir); errChdir != nil { t.Fatalf("Failed to change into temp directory: %v", errChdir) } + t.Cleanup(func() { + if errChdir := os.Chdir(cwd); errChdir != nil { + t.Fatalf("Failed to restore working directory: %v", errChdir) + } + }) + ctrl := gomock.NewController(t) t.Cleanup(ctrl.Finish) @@ -224,11 +236,14 @@ func setupCleanTest(t *testing.T) (*commands.CLI, *mocks.MockLogger) { mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) + + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil).AnyTimes() - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) - return commands.New(a), mockLogger + return commands.New(a), mockLogger, tmpDir } func createDirWithMarker(t *testing.T, dirPath string) { @@ -243,10 +258,10 @@ func createDirWithMarker(t *testing.T, dirPath string) { } func TestCleanCmd_Default(t *testing.T) { - cli, mockLogger := setupCleanTest(t) + cli, mockLogger, tmpDir := setupCleanTest(t) mockLogger.EXPECT().Info(gomock.Any()).AnyTimes() - storePath := filepath.Join(domain.DefaultSamePath(), domain.StoreDirName) + storePath := filepath.Join(tmpDir, domain.DefaultSamePath(), domain.StoreDirName) if err := os.MkdirAll(storePath, domain.DirPerm); err != nil { t.Fatalf("Failed to create store directory: %v", err) } @@ -267,10 +282,10 @@ func TestCleanCmd_Default(t *testing.T) { } func TestCleanCmd_Tools(t *testing.T) { - cli, mockLogger := setupCleanTest(t) + cli, mockLogger, tmpDir := setupCleanTest(t) mockLogger.EXPECT().Info(gomock.Any()).AnyTimes() - nixHubPath := domain.DefaultNixHubCachePath() + nixHubPath := filepath.Join(tmpDir, domain.DefaultNixHubCachePath()) if err := os.MkdirAll(nixHubPath, domain.DirPerm); err != nil { t.Fatalf("Failed to create nixhub cache directory: %v", err) } @@ -279,7 +294,7 @@ func TestCleanCmd_Tools(t *testing.T) { t.Fatalf("Failed to create marker file: %v", err) } - envPath := domain.DefaultEnvCachePath() + envPath := filepath.Join(tmpDir, domain.DefaultEnvCachePath()) if err := os.MkdirAll(envPath, domain.DirPerm); err != nil { t.Fatalf("Failed to create env cache directory: %v", err) } @@ -303,16 +318,16 @@ func TestCleanCmd_Tools(t *testing.T) { } func TestCleanCmd_All(t *testing.T) { - cli, mockLogger := setupCleanTest(t) + cli, mockLogger, tmpDir := setupCleanTest(t) mockLogger.EXPECT().Info(gomock.Any()).AnyTimes() - storePath := filepath.Join(domain.DefaultSamePath(), domain.StoreDirName) + storePath := filepath.Join(tmpDir, domain.DefaultSamePath(), domain.StoreDirName) createDirWithMarker(t, storePath) - nixHubPath := domain.DefaultNixHubCachePath() + nixHubPath := filepath.Join(tmpDir, domain.DefaultNixHubCachePath()) createDirWithMarker(t, nixHubPath) - envPath := domain.DefaultEnvCachePath() + envPath := filepath.Join(tmpDir, domain.DefaultEnvCachePath()) createDirWithMarker(t, envPath) cli.SetArgs([]string{"clean", "--all"}) @@ -358,26 +373,31 @@ func TestRun_OutputModeFlags(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) g := domain.NewGraph() g.SetRoot(".") buildTask := &domain.Task{Name: domain.NewInternedString("build"), WorkingDir: domain.NewInternedString("Root")} _ = g.AddTask(buildTask) - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions(tea.WithInput(nil), tea.WithOutput(io.Discard)) cli := commands.New(a) - mockLoader.EXPECT().Load(".").Return(g, nil).Times(1) - mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil).Times(1) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(".", nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil).Times(1) + mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil).Times(1) mockHasher.EXPECT().ComputeInputHash(gomock.Any(), gomock.Any(), gomock.Any()).Return("hash123", nil).Times(1) - mockStore.EXPECT().Get("build").Return(nil, nil).Times(1) + mockStore.EXPECT().Get(".", "build").Return(nil, nil).Times(1) mockExecutor.EXPECT().Execute( gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), ).Return(nil).Times(1) - mockStore.EXPECT().Put(gomock.Any()).Return(nil).Times(1) + mockStore.EXPECT().Put(".", gomock.Any()).Return(nil).Times(1) cli.SetArgs(tt.args) diff --git a/cli/cmd/same/commands/daemon.go b/cli/cmd/same/commands/daemon.go new file mode 100644 index 0000000..7cc71c6 --- /dev/null +++ b/cli/cmd/same/commands/daemon.go @@ -0,0 +1,49 @@ +package commands + +import ( + "github.com/spf13/cobra" +) + +func (c *CLI) newDaemonCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "daemon", + Short: "Manage the background daemon", + } + + cmd.AddCommand(c.newDaemonServeCmd()) + cmd.AddCommand(c.newDaemonStatusCmd()) + cmd.AddCommand(c.newDaemonStopCmd()) + + return cmd +} + +func (c *CLI) newDaemonServeCmd() *cobra.Command { + return &cobra.Command{ + Use: "serve", + Short: "Start the daemon server (internal use)", + Hidden: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return c.app.ServeDaemon(cmd.Context()) + }, + } +} + +func (c *CLI) newDaemonStatusCmd() *cobra.Command { + return &cobra.Command{ + Use: "status", + Short: "Show daemon status", + RunE: func(cmd *cobra.Command, _ []string) error { + return c.app.DaemonStatus(cmd.Context()) + }, + } +} + +func (c *CLI) newDaemonStopCmd() *cobra.Command { + return &cobra.Command{ + Use: "stop", + Short: "Stop the daemon", + RunE: func(cmd *cobra.Command, _ []string) error { + return c.app.StopDaemon(cmd.Context()) + }, + } +} diff --git a/cli/cmd/same/commands/root.go b/cli/cmd/same/commands/root.go index cb946b4..d76b306 100644 --- a/cli/cmd/same/commands/root.go +++ b/cli/cmd/same/commands/root.go @@ -45,6 +45,7 @@ func New(a *app.App) *CLI { rootCmd.AddCommand(c.newRunCmd()) rootCmd.AddCommand(c.newCleanCmd()) rootCmd.AddCommand(c.newVersionCmd()) + rootCmd.AddCommand(c.newDaemonCmd()) return c } diff --git a/cli/cmd/same/commands/run.go b/cli/cmd/same/commands/run.go index 67055b9..dea3c0f 100644 --- a/cli/cmd/same/commands/run.go +++ b/cli/cmd/same/commands/run.go @@ -18,8 +18,10 @@ func (c *CLI) newRunCmd() *cobra.Command { } noCache, _ := cmd.Flags().GetBool("no-cache") inspect, _ := cmd.Flags().GetBool("inspect") + inspectOnError, _ := cmd.Flags().GetBool("inspect-on-error") outputMode, _ := cmd.Flags().GetString("output-mode") ci, _ := cmd.Flags().GetBool("ci") + noDaemon, _ := cmd.Flags().GetBool("no-daemon") // If --ci is set, override output-mode to "linear" if ci { @@ -27,15 +29,19 @@ func (c *CLI) newRunCmd() *cobra.Command { } return c.app.Run(cmd.Context(), args, app.RunOptions{ - NoCache: noCache, - Inspect: inspect, - OutputMode: outputMode, + NoCache: noCache, + Inspect: inspect, + InspectOnError: inspectOnError, + OutputMode: outputMode, + NoDaemon: noDaemon, }) }, } cmd.Flags().BoolP("no-cache", "n", false, "Bypass the build cache and force execution") cmd.Flags().BoolP("inspect", "i", false, "Inspect the TUI after build completion (prevents auto-exit)") + cmd.Flags().Bool("inspect-on-error", true, "Keep TUI open if build fails") cmd.Flags().StringP("output-mode", "o", "auto", "Output mode: auto, tui, or linear") cmd.Flags().Bool("ci", false, "Use linear output mode (shorthand for --output-mode=linear)") + cmd.Flags().Bool("no-daemon", false, "Bypass remote daemon execution and run locally") return cmd } diff --git a/cli/cmd/same/main_test.go b/cli/cmd/same/main_test.go index e90610e..958e067 100644 --- a/cli/cmd/same/main_test.go +++ b/cli/cmd/same/main_test.go @@ -40,7 +40,7 @@ tasks: } return configPath }, - args: []string{"same", "run", "test"}, + args: []string{"same", "run", "test", "--no-daemon", "--inspect-on-error=false"}, expectedExit: 0, }, } @@ -116,7 +116,7 @@ tasks: }() // Set args - os.Args = []string{"same", "run", "test"} + os.Args = []string{"same", "run", "test", "--no-daemon", "--inspect-on-error=false"} // Run and expect error exit code exitCode := run() diff --git a/cli/codecov.yml b/cli/codecov.yml index d2988ea..038ce5c 100644 --- a/cli/codecov.yml +++ b/cli/codecov.yml @@ -23,6 +23,8 @@ ignore: - "**/mock_*.go" - "**/*_mock.go" - "**/node.go" + - "**/*.pb.go" + - "**/main.go" comment: layout: "reach, diff, flags, files" diff --git a/cli/e2e/testdata/workspace.txtar b/cli/e2e/testdata/workspace.txtar index adee4db..5cadd83 100644 --- a/cli/e2e/testdata/workspace.txtar +++ b/cli/e2e/testdata/workspace.txtar @@ -1,12 +1,12 @@ # Run task from sub-project cd proj1 -exec same run --ci build -stderr '\[build\] ✓ Completed' +exec same run --ci proj1:build +stderr '\[proj1:build\] ✓ Completed' # Run task from another project cd ../proj2 -exec same run --ci test -stderr '\[test\] ✓ Completed' +exec same run --ci proj2:test +stderr '\[proj2:test\] ✓ Completed' -- same.work.yaml -- version: "1" diff --git a/cli/go.mod b/cli/go.mod index 32d2feb..e087bc5 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -7,6 +7,7 @@ require ( github.com/charmbracelet/bubbletea v1.3.10 github.com/charmbracelet/lipgloss v1.1.0 github.com/creack/pty v1.1.24 + github.com/fsnotify/fsnotify v1.9.0 github.com/grindlemire/graft v0.2.3 github.com/muesli/termenv v0.16.0 github.com/rogpeppe/go-internal v1.14.1 @@ -20,6 +21,9 @@ require ( go.uber.org/mock v0.6.0 golang.org/x/sync v0.19.0 golang.org/x/term v0.39.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda + google.golang.org/grpc v1.78.0 + google.golang.org/protobuf v1.36.11 gopkg.in/yaml.v3 v3.0.1 ) @@ -52,7 +56,8 @@ require ( go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel/metric v1.39.0 // indirect golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.48.0 // indirect golang.org/x/sys v0.40.0 // indirect - golang.org/x/text v0.3.8 // indirect + golang.org/x/text v0.32.0 // indirect golang.org/x/tools v0.40.0 // indirect ) diff --git a/cli/go.sum b/cli/go.sum index 0f6002b..6631da7 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -29,11 +29,15 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -105,6 +109,8 @@ golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZ golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -113,10 +119,18 @@ golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= -golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/cli/internal/adapters/cas/node.go b/cli/internal/adapters/cas/node.go index 6e9bc43..2880546 100644 --- a/cli/internal/adapters/cas/node.go +++ b/cli/internal/adapters/cas/node.go @@ -15,11 +15,7 @@ func init() { ID: NodeID, Cacheable: true, Run: func(_ context.Context) (ports.BuildInfoStore, error) { - store, err := NewStore() - if err != nil { - return nil, err - } - return store, nil + return NewStore() }, }) } diff --git a/cli/internal/adapters/cas/store.go b/cli/internal/adapters/cas/store.go index f14f4c6..7af0328 100644 --- a/cli/internal/adapters/cas/store.go +++ b/cli/internal/adapters/cas/store.go @@ -15,30 +15,22 @@ import ( ) // Store implements ports.BuildInfoStore using a file-per-task strategy. -type Store struct { - dir string -} +type Store struct{} // NewStore creates a new BuildInfoStore backed by the directory at the given path. func NewStore() (*Store, error) { - return newStoreWithPath(domain.DefaultStorePath()) + return &Store{}, nil } -// newStoreWithPath creates a Store with a custom path (used for testing). -func newStoreWithPath(path string) (*Store, error) { - cleanPath := filepath.Clean(path) - if err := os.MkdirAll(cleanPath, domain.DirPerm); err != nil { - return nil, zerr.Wrap(err, domain.ErrStoreCreateFailed.Error()) - } - - return &Store{ - dir: cleanPath, - }, nil +// newStoreWithPath is retained for test compatibility but no longer uses the path parameter. +// All operations now require an explicit root parameter. Tests should pass tmpDir as root to Get/Put. +func newStoreWithPath(_ string) (*Store, error) { + return &Store{}, nil } // Get retrieves the build info for a given task name. -func (s *Store) Get(taskName string) (*domain.BuildInfo, error) { - filename := s.getFilename(taskName) +func (s *Store) Get(root, taskName string) (*domain.BuildInfo, error) { + filename := s.getFilename(root, taskName) //nolint:gosec // Path is constructed from trusted directory and hashed filename data, err := os.ReadFile(filename) if err != nil { @@ -57,13 +49,18 @@ func (s *Store) Get(taskName string) (*domain.BuildInfo, error) { } // Put stores the build info. -func (s *Store) Put(info domain.BuildInfo) error { +func (s *Store) Put(root string, info domain.BuildInfo) error { data, err := json.MarshalIndent(info, "", " ") if err != nil { return zerr.Wrap(err, domain.ErrStoreMarshalFailed.Error()) } - filename := s.getFilename(info.TaskName) + filename := s.getFilename(root, info.TaskName) + dir := filepath.Dir(filename) + if err := os.MkdirAll(dir, domain.DirPerm); err != nil { + return zerr.Wrap(err, domain.ErrStoreCreateFailed.Error()) + } + //nolint:gosec // Path is constructed from trusted directory and hashed filename if err := os.WriteFile(filename, data, domain.FilePerm); err != nil { return zerr.Wrap(err, domain.ErrStoreWriteFailed.Error()) @@ -72,8 +69,9 @@ func (s *Store) Put(info domain.BuildInfo) error { return nil } -func (s *Store) getFilename(taskName string) string { +func (s *Store) getFilename(root, taskName string) string { hash := sha256.Sum256([]byte(taskName)) hexHash := hex.EncodeToString(hash[:]) - return filepath.Join(s.dir, hexHash+".json") + storeDir := filepath.Join(root, domain.DefaultStorePath()) + return filepath.Join(storeDir, hexHash+".json") } diff --git a/cli/internal/adapters/cas/store_test.go b/cli/internal/adapters/cas/store_test.go index d4d3fe8..390e41e 100644 --- a/cli/internal/adapters/cas/store_test.go +++ b/cli/internal/adapters/cas/store_test.go @@ -14,23 +14,6 @@ import ( ) func TestNewStore(t *testing.T) { - // NewStore uses a hardcoded path ".same/store" - // We need to test in a temp directory context - originalWd, err := os.Getwd() - if err != nil { - t.Fatalf("Getwd failed: %v", err) - } - - tmpDir := t.TempDir() - if cdErr := os.Chdir(tmpDir); cdErr != nil { - t.Fatalf("Chdir failed: %v", cdErr) - } - defer func() { - if chErr := os.Chdir(originalWd); chErr != nil { - t.Errorf("Failed to restore working directory: %v", chErr) - } - }() - store, err := cas.NewStore() if err != nil { t.Fatalf("NewStore failed: %v", err) @@ -39,19 +22,18 @@ func TestNewStore(t *testing.T) { t.Fatal("NewStore returned nil store") } - // Verify the directory was created - // .same/store is the default path + // Verify that no directory is created eagerly + tmpDir := t.TempDir() expectedPath := filepath.Join(tmpDir, domain.DefaultStorePath()) - if _, statErr := os.Stat(expectedPath); os.IsNotExist(statErr) { - t.Errorf("NewStore did not create directory at %s", expectedPath) + if _, statErr := os.Stat(expectedPath); statErr == nil { + t.Errorf("NewStore should not create directory eagerly at %s", expectedPath) } } func TestStore_PutAndGet(t *testing.T) { tmpDir := t.TempDir() - storePath := filepath.Join(tmpDir, "same_state") - store, err := cas.NewStoreWithPath(storePath) + store, err := cas.NewStoreWithPath("") if err != nil { t.Fatalf("NewStore failed: %v", err) } @@ -63,12 +45,12 @@ func TestStore_PutAndGet(t *testing.T) { Timestamp: time.Now(), } - err2 := store.Put(info) + err2 := store.Put(tmpDir, info) if err2 != nil { t.Fatalf("Put failed: %v", err2) } - got, err := store.Get("task1") + got, err := store.Get(tmpDir, "task1") if err != nil { t.Fatalf("Get failed: %v", err) } @@ -84,10 +66,9 @@ func TestStore_PutAndGet(t *testing.T) { func TestStore_Persistence(t *testing.T) { tmpDir := t.TempDir() - storePath := filepath.Join(tmpDir, "same_state") // 1. Create store and save data - store1, err := cas.NewStoreWithPath(storePath) + store1, err := cas.NewStoreWithPath("") if err != nil { t.Fatalf("NewStore 1 failed: %v", err) } @@ -96,17 +77,17 @@ func TestStore_Persistence(t *testing.T) { TaskName: "task2", InputHash: "xyz", } - if err := store1.Put(info); err != nil { + if err := store1.Put(tmpDir, info); err != nil { t.Fatalf("Put failed: %v", err) } // 2. Create new store instance pointing to same directory - store2, err2 := cas.NewStoreWithPath(storePath) + store2, err2 := cas.NewStoreWithPath("") if err2 != nil { t.Fatalf("NewStore 2 failed: %v", err2) } - got, err3 := store2.Get("task2") + got, err3 := store2.Get(tmpDir, "task2") if err3 != nil { t.Fatalf("Get failed: %v", err3) } @@ -121,9 +102,8 @@ func TestStore_Persistence(t *testing.T) { func TestStore_OmitZero(t *testing.T) { tmpDir := t.TempDir() - storePath := filepath.Join(tmpDir, "same_state") - store, err := cas.NewStoreWithPath(storePath) + store, err := cas.NewStoreWithPath("") if err != nil { t.Fatalf("NewStore failed: %v", err) } @@ -133,7 +113,7 @@ func TestStore_OmitZero(t *testing.T) { TaskName: "task_zero", } - err2 := store.Put(info) + err2 := store.Put(tmpDir, info) if err2 != nil { t.Fatalf("Put failed: %v", err2) } @@ -141,6 +121,7 @@ func TestStore_OmitZero(t *testing.T) { // Read the file content directly hash := sha256.Sum256([]byte("task_zero")) hexHash := hex.EncodeToString(hash[:]) + storePath := filepath.Join(tmpDir, domain.DefaultStorePath()) taskFile := filepath.Join(storePath, hexHash+".json") //nolint:gosec // Test file with controlled path @@ -171,21 +152,33 @@ func TestStore_OmitZero(t *testing.T) { func TestNewStore_Error(t *testing.T) { tmpDir := t.TempDir() // Create a file where the directory should be - filePath := filepath.Join(tmpDir, "file_blocking_dir") + filePath := filepath.Join(tmpDir, domain.DefaultStorePath()) + // Create parent directory first + if err := os.MkdirAll(filepath.Dir(filePath), domain.DirPerm); err != nil { + t.Fatalf("MkdirAll failed: %v", err) + } if err := os.WriteFile(filePath, []byte("block"), domain.PrivateFilePerm); err != nil { t.Fatalf("WriteFile failed: %v", err) } - _, err := cas.NewStoreWithPath(filePath) + store, err := cas.NewStoreWithPath("") + if err != nil { + t.Fatal("NewStore should not fail (directory creation is lazy)") + } + + // But Put should fail + info := domain.BuildInfo{ + TaskName: "test", + } + err = store.Put(tmpDir, info) if err == nil { - t.Fatal("NewStore should have failed when path is a file") + t.Fatal("Put should have failed when path is a file") } } func TestGet_ReadError(t *testing.T) { tmpDir := t.TempDir() - storePath := filepath.Join(tmpDir, "same_state") - store, err := cas.NewStoreWithPath(storePath) + store, err := cas.NewStoreWithPath("") if err != nil { t.Fatalf("NewStore failed: %v", err) } @@ -194,6 +187,10 @@ func TestGet_ReadError(t *testing.T) { taskName := "task_read_error" hash := sha256.Sum256([]byte(taskName)) hexHash := hex.EncodeToString(hash[:]) + storePath := filepath.Join(tmpDir, domain.DefaultStorePath()) + if err = os.MkdirAll(storePath, domain.DirPerm); err != nil { + t.Fatalf("MkdirAll failed: %v", err) + } taskFile := filepath.Join(storePath, hexHash+".json") // Write only @@ -202,7 +199,7 @@ func TestGet_ReadError(t *testing.T) { t.Fatalf("WriteFile failed: %v", err) } - _, err = store.Get(taskName) + _, err = store.Get(tmpDir, taskName) if err == nil { t.Fatal("Get should have failed due to read permissions") } @@ -210,8 +207,7 @@ func TestGet_ReadError(t *testing.T) { func TestGet_UnmarshalError(t *testing.T) { tmpDir := t.TempDir() - storePath := filepath.Join(tmpDir, "same_state") - store, err := cas.NewStoreWithPath(storePath) + store, err := cas.NewStoreWithPath("") if err != nil { t.Fatalf("NewStore failed: %v", err) } @@ -220,13 +216,17 @@ func TestGet_UnmarshalError(t *testing.T) { taskName := "task_invalid_json" hash := sha256.Sum256([]byte(taskName)) hexHash := hex.EncodeToString(hash[:]) + storePath := filepath.Join(tmpDir, domain.DefaultStorePath()) + if err = os.MkdirAll(storePath, domain.DirPerm); err != nil { + t.Fatalf("MkdirAll failed: %v", err) + } taskFile := filepath.Join(storePath, hexHash+".json") if err = os.WriteFile(taskFile, []byte("{ invalid json"), domain.PrivateFilePerm); err != nil { t.Fatalf("WriteFile failed: %v", err) } - _, err = store.Get(taskName) + _, err = store.Get(tmpDir, taskName) if err == nil { t.Fatal("Get should have failed due to invalid JSON") } @@ -234,12 +234,17 @@ func TestGet_UnmarshalError(t *testing.T) { func TestPut_WriteError(t *testing.T) { tmpDir := t.TempDir() - storePath := filepath.Join(tmpDir, "same_state") - store, err := cas.NewStoreWithPath(storePath) + store, err := cas.NewStoreWithPath("") if err != nil { t.Fatalf("NewStore failed: %v", err) } + // Create the store directory first + storePath := filepath.Join(tmpDir, domain.DefaultStorePath()) + if err = os.MkdirAll(storePath, domain.DirPerm); err != nil { + t.Fatalf("MkdirAll failed: %v", err) + } + // Remove write permissions from the directory //nolint:gosec // Intentionally restricting permissions for testing if err = os.Chmod(storePath, 0o500); err != nil { // Read/Execute only @@ -256,7 +261,7 @@ func TestPut_WriteError(t *testing.T) { TaskName: "task_write_error", } - err = store.Put(info) + err = store.Put(tmpDir, info) if err == nil { t.Fatal("Put should have failed due to directory permissions") } diff --git a/cli/internal/adapters/config/loader.go b/cli/internal/adapters/config/loader.go index 402311d..0805b80 100644 --- a/cli/internal/adapters/config/loader.go +++ b/cli/internal/adapters/config/loader.go @@ -55,35 +55,54 @@ func (l *Loader) Load(cwd string) (*domain.Graph, error) { } func (l *Loader) findConfiguration(cwd string) (string, Mode, error) { + root, err := l.DiscoverRoot(cwd) + if err != nil { + return "", "", err + } + + workfilePath := filepath.Join(root, domain.WorkFileName) + if _, err := os.Stat(workfilePath); err == nil { + return workfilePath, ModeWorkspace, nil + } + + samefilePath := filepath.Join(root, domain.SameFileName) + if _, err := os.Stat(samefilePath); err == nil { + return samefilePath, ModeStandalone, nil + } + + return "", "", zerr.With(domain.ErrConfigNotFound, "cwd", cwd) +} + +// DiscoverRoot walks up from cwd to find the workspace root. +func (l *Loader) DiscoverRoot(cwd string) (string, error) { currentDir := cwd var standaloneCandidate string for { workfilePath := filepath.Join(currentDir, domain.WorkFileName) if _, err := os.Stat(workfilePath); err == nil { - return workfilePath, ModeWorkspace, nil + return currentDir, nil } if standaloneCandidate == "" { samefilePath := filepath.Join(currentDir, domain.SameFileName) if _, err := os.Stat(samefilePath); err == nil { - standaloneCandidate = samefilePath + standaloneCandidate = currentDir } } parentDir := filepath.Dir(currentDir) if parentDir == currentDir { - // Reached root break } currentDir = parentDir } if standaloneCandidate != "" { - return standaloneCandidate, ModeStandalone, nil + return standaloneCandidate, nil } - return "", "", zerr.With(domain.ErrConfigNotFound, "cwd", cwd) + return "", zerr.With(domain.ErrConfigNotFound, "cwd", cwd) } func (l *Loader) loadSamefile(configPath string) (*domain.Graph, error) { @@ -519,3 +538,81 @@ func validateRebuildStrategy(value string) (domain.RebuildStrategy, error) { return "", domain.ErrInvalidRebuildStrategy } } + +// DiscoverConfigPaths finds same.yaml and same.work.yaml paths from cwd. +// Returns paths and their mtimes for cache validation. +// It walks up the directory tree and finds all config files that would be loaded +// for a workspace (including workspace file and all project files). +func (l *Loader) DiscoverConfigPaths(cwd string) (map[string]int64, error) { + paths := make(map[string]int64) + + // First, find the workspace or standalone config + currentDir := cwd + var standaloneCandidate string + + for { + workfilePath := filepath.Join(currentDir, domain.WorkFileName) + if info, err := os.Stat(workfilePath); err == nil { + // Found workspace file, add it + paths[workfilePath] = info.ModTime().UnixNano() + + // For workspace mode, also find all project same.yaml files + if err := l.discoverWorkspaceProjectPaths(currentDir, paths); err != nil { + return nil, zerr.Wrap(err, "failed to discover project paths") + } + + return paths, nil + } + + if standaloneCandidate == "" { + samefilePath := filepath.Join(currentDir, domain.SameFileName) + if info, err := os.Stat(samefilePath); err == nil { + standaloneCandidate = samefilePath + paths[samefilePath] = info.ModTime().UnixNano() + } + } + + parentDir := filepath.Dir(currentDir) + if parentDir == currentDir { + // Reached root + break + } + currentDir = parentDir + } + + if standaloneCandidate != "" { + // Standalone mode, only one config file + return paths, nil + } + + return nil, zerr.With(domain.ErrConfigNotFound, "cwd", cwd) +} + +// discoverWorkspaceProjectPaths finds all same.yaml files in workspace projects. +func (l *Loader) discoverWorkspaceProjectPaths(workspaceRoot string, paths map[string]int64) error { + workfilePath := filepath.Join(workspaceRoot, domain.WorkFileName) + //nolint:gosec // G304: Path is constructed from validated workspace root, safe for use + workfileData, readErr := os.ReadFile(workfilePath) + if readErr != nil { + return zerr.Wrap(readErr, "failed to read workfile") + } + + var workfile Workfile + if unmarshalErr := yaml.Unmarshal(workfileData, &workfile); unmarshalErr != nil { + return zerr.Wrap(unmarshalErr, "failed to parse workfile") + } + + projectPaths, resolveErr := l.resolveProjectPaths(workspaceRoot, workfile.Projects) + if resolveErr != nil { + return resolveErr + } + + for _, projectPath := range projectPaths { + sameYamlPath := filepath.Join(projectPath, domain.SameFileName) + if info, statErr := os.Stat(sameYamlPath); statErr == nil { + paths[sameYamlPath] = info.ModTime().UnixNano() + } + } + + return nil +} diff --git a/cli/internal/adapters/daemon/cache.go b/cli/internal/adapters/daemon/cache.go new file mode 100644 index 0000000..4f1dc28 --- /dev/null +++ b/cli/internal/adapters/daemon/cache.go @@ -0,0 +1,81 @@ +// Package daemon provides the daemon server and client implementations. +package daemon + +import ( + "sync" + + "go.trai.ch/same/internal/core/domain" +) + +// ServerCache holds thread-safe in-memory caches for the daemon server. +// +// Cache Validation Assumption: +// The cache validation logic trusts client-provided mtime values and compares them +// against stored mtimes without verifying actual file mtimes on the server. +// This design assumes the daemon and client share the same filesystem view, which +// is valid for local Unix-socket daemons but would need revision for remote scenarios. +type ServerCache struct { + mu sync.RWMutex + graphCache map[string]*domain.GraphCacheEntry // cwd -> entry + envCache map[string][]string // envID -> env vars +} + +// NewServerCache creates a new ServerCache instance. +func NewServerCache() *ServerCache { + return &ServerCache{ + graphCache: make(map[string]*domain.GraphCacheEntry), + envCache: make(map[string][]string), + } +} + +// GetGraph retrieves a cached graph for the given cwd and validates mtimes. +// Returns the graph and true if cache hit and valid, nil and false otherwise. +func (c *ServerCache) GetGraph(cwd string, clientMtimes map[string]int64) (*domain.Graph, bool) { + c.mu.RLock() + defer c.mu.RUnlock() + + entry, exists := c.graphCache[cwd] + if !exists { + return nil, false + } + + // Validate mtimes match + if len(clientMtimes) != len(entry.Mtimes) { + return nil, false + } + + for path, clientMtime := range clientMtimes { + storedMtime, ok := entry.Mtimes[path] + if !ok || clientMtime != storedMtime { + return nil, false + } + } + + return entry.Graph, true +} + +// SetGraph stores a graph in the cache with its mtimes. +func (c *ServerCache) SetGraph(cwd string, entry *domain.GraphCacheEntry) { + c.mu.Lock() + defer c.mu.Unlock() + + c.graphCache[cwd] = entry +} + +// GetEnv retrieves cached environment variables for the given envID. +// Returns the env vars and true if cache hit, nil and false otherwise. +func (c *ServerCache) GetEnv(envID string) ([]string, bool) { + c.mu.RLock() + defer c.mu.RUnlock() + + env, exists := c.envCache[envID] + return env, exists +} + +// SetEnv stores environment variables in the cache. +func (c *ServerCache) SetEnv(envID string, env []string) { + c.mu.Lock() + defer c.mu.Unlock() + + c.envCache[envID] = env +} diff --git a/cli/internal/adapters/daemon/client.go b/cli/internal/adapters/daemon/client.go new file mode 100644 index 0000000..03f83a3 --- /dev/null +++ b/cli/internal/adapters/daemon/client.go @@ -0,0 +1,306 @@ +// Package daemon implements the background daemon adapter for same. +// It provides gRPC server and client for inter-process communication over Unix Domain Sockets. +package daemon + +import ( + "context" + "io" + "path/filepath" + "strconv" + "time" + + "go.trai.ch/same/api/daemon/v1" + "go.trai.ch/same/internal/core/domain" + "go.trai.ch/same/internal/core/ports" + "go.trai.ch/zerr" + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" +) + +const ( + // gRPC backoff configuration for fast connection establishment. + grpcBaseDelay = 50 * time.Millisecond + grpcMaxDelay = 200 * time.Millisecond + grpcMinConnectTimeout = 100 * time.Millisecond + grpcBackoffMultiplier = 1.5 +) + +// Client implements ports.DaemonClient. +type Client struct { + conn *grpc.ClientConn + client daemonv1.DaemonServiceClient +} + +// Dial connects to the daemon over UDS at the specified workspace root. +// Note: grpc.NewClient returns immediately; actual connection happens lazily on first RPC. +func Dial(root string) (*Client, error) { + if root == "" { + return nil, zerr.New("root cannot be empty") + } + + absRoot, err := filepath.Abs(root) + if err != nil { + return nil, zerr.Wrap(err, "failed to resolve absolute root path") + } + + socketPath := filepath.Join(absRoot, domain.DefaultDaemonSocketPath()) + target := "unix://" + socketPath + + conn, err := grpc.NewClient(target, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{ + BaseDelay: grpcBaseDelay, + Multiplier: grpcBackoffMultiplier, + MaxDelay: grpcMaxDelay, + }, + MinConnectTimeout: grpcMinConnectTimeout, + }), + ) + if err != nil { + return nil, zerr.Wrap(err, "daemon client creation failed") + } + + client := &Client{ + conn: conn, + client: daemonv1.NewDaemonServiceClient(conn), + } + return client, nil +} + +// Ping implements ports.DaemonClient. +func (c *Client) Ping(ctx context.Context) error { + _, err := c.client.Ping(ctx, &daemonv1.PingRequest{}) + return err +} + +// Status implements ports.DaemonClient. +func (c *Client) Status(ctx context.Context) (*ports.DaemonStatus, error) { + resp, err := c.client.Status(ctx, &daemonv1.StatusRequest{}) + if err != nil { + return nil, err + } + return &ports.DaemonStatus{ + Running: resp.Running, + PID: int(resp.Pid), + Uptime: time.Duration(resp.UptimeSeconds) * time.Second, + LastActivity: time.Unix(resp.LastActivityUnix, 0), + IdleRemaining: time.Duration(resp.IdleRemainingSeconds) * time.Second, + }, nil +} + +// Shutdown implements ports.DaemonClient. +func (c *Client) Shutdown(ctx context.Context) error { + _, err := c.client.Shutdown(ctx, &daemonv1.ShutdownRequest{Graceful: true}) + return err +} + +// GetGraph implements ports.DaemonClient. +func (c *Client) GetGraph( + ctx context.Context, + cwd string, + configMtimes map[string]int64, +) (graph *domain.Graph, cacheHit bool, err error) { + // Build request + req := &daemonv1.GetGraphRequest{ + Cwd: cwd, + } + for path, mtime := range configMtimes { + req.ConfigMtimes = append(req.ConfigMtimes, &daemonv1.ConfigMtime{ + Path: path, + MtimeUnixNano: mtime, + }) + } + + // Call gRPC + resp, err := c.client.GetGraph(ctx, req) + if err != nil { + return nil, false, zerr.Wrap(err, "GetGraph RPC failed") + } + + // Convert response to domain.Graph + graph = domain.NewGraph() + for _, taskProto := range resp.Tasks { + task := &domain.Task{ + Name: domain.NewInternedString(taskProto.Name), + Command: taskProto.Command, + Inputs: c.stringsToInternedStrings(taskProto.Inputs), + Outputs: c.stringsToInternedStrings(taskProto.Outputs), + Tools: taskProto.Tools, + Dependencies: c.stringsToInternedStrings(taskProto.Dependencies), + Environment: taskProto.Environment, + WorkingDir: domain.NewInternedString(taskProto.WorkingDir), + RebuildStrategy: domain.RebuildStrategy(taskProto.RebuildStrategy), + } + if err := graph.AddTask(task); err != nil { + return nil, false, zerr.Wrap(err, "failed to add task to graph") + } + } + + // Set root (important: must be set after all tasks are added) + graph.SetRoot(resp.Root) + + // Validate the graph to compute executionOrder and dependents + if err := graph.Validate(); err != nil { + return nil, false, zerr.Wrap(err, "failed to validate reconstructed graph") + } + + return graph, resp.CacheHit, nil +} + +// GetEnvironment implements ports.DaemonClient. +func (c *Client) GetEnvironment( + ctx context.Context, + envID string, + tools map[string]string, +) (envVars []string, cacheHit bool, err error) { + req := &daemonv1.GetEnvironmentRequest{ + EnvId: envID, + Tools: tools, + } + + resp, err := c.client.GetEnvironment(ctx, req) + if err != nil { + return nil, false, zerr.Wrap(err, "GetEnvironment RPC failed") + } + + return resp.EnvVars, resp.CacheHit, nil +} + +// GetInputHash implements ports.DaemonClient. +func (c *Client) GetInputHash( + ctx context.Context, + taskName, root string, + env map[string]string, +) (ports.InputHashResult, error) { + req := &daemonv1.GetInputHashRequest{ + TaskName: taskName, + Root: root, + Environment: env, + } + + resp, err := c.client.GetInputHash(ctx, req) + if err != nil { + return ports.InputHashResult{State: ports.HashUnknown}, zerr.Wrap(err, "GetInputHash RPC failed") + } + + // Convert the proto enum to the ports.InputHashState + var state ports.InputHashState + switch resp.State { + case daemonv1.GetInputHashResponse_READY: + state = ports.HashReady + case daemonv1.GetInputHashResponse_PENDING: + state = ports.HashPending + default: + state = ports.HashUnknown + } + + return ports.InputHashResult{ + State: state, + Hash: resp.Hash, + }, nil +} + +// ExecuteTask implements ports.DaemonClient. +// Note: stderr is intentionally merged into stdout for PTY mode. This is because +// PTY sessions combine both output streams by design. For non-PTY scenarios, +// consider separate stderr handling in the Executor implementation. +func (c *Client) ExecuteTask( + ctx context.Context, + task *domain.Task, + nixEnv []string, + stdout, _ io.Writer, +) error { + // Build request + const ( + defaultPtyRows = 24 + defaultPtyCols = 80 + ) + + req := &daemonv1.ExecuteTaskRequest{ + TaskName: task.Name.String(), + Command: task.Command, + WorkingDir: task.WorkingDir.String(), + TaskEnvironment: task.Environment, + NixEnvironment: nixEnv, + PtyRows: defaultPtyRows, + PtyCols: defaultPtyCols, + } + + // Start streaming RPC + stream, err := c.client.ExecuteTask(ctx, req) + if err != nil { + return zerr.Wrap(err, "ExecuteTask RPC failed") + } + + // Receive and forward log chunks + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return c.handleExecuteError(err, stream) + } + if _, writeErr := stdout.Write(resp.Data); writeErr != nil { + return zerr.Wrap(writeErr, "failed to write log chunk") + } + } + + // Check trailer for success case + trailer := stream.Trailer() + if exitStr := trailer.Get("x-exit-code"); len(exitStr) > 0 { + exitCode, err := strconv.Atoi(exitStr[0]) + if err != nil { + return zerr.Wrap(err, "malformed exit code in trailer") + } + if exitCode != 0 { + return zerr.With(domain.ErrTaskExecutionFailed, "exit_code", exitCode) + } + } + + return nil +} + +// handleExecuteError extracts the exit code from a failed ExecuteTask RPC. +func (c *Client) handleExecuteError(err error, stream grpc.ClientStream) error { + st, ok := status.FromError(err) + if !ok { + return zerr.Wrap(err, "ExecuteTask failed") + } + + // For non-zero exit codes, we get UNKNOWN status + if st.Code() == codes.Unknown { + // Try to extract exit code from trailer + trailer := stream.Trailer() + if exitStr := trailer.Get("x-exit-code"); len(exitStr) > 0 { + exitCode, parseErr := strconv.Atoi(exitStr[0]) + if parseErr != nil { + wrapped := zerr.Wrap(parseErr, "malformed exit code in trailer") + return zerr.With(wrapped, "original_error", err.Error()) + } + return zerr.With(domain.ErrTaskExecutionFailed, "exit_code", exitCode) + } + // If no trailer, return the status error + return zerr.Wrap(err, "ExecuteTask failed with unknown error") + } + + return zerr.Wrap(err, "ExecuteTask failed") +} + +// stringsToInternedStrings converts a slice of strings to InternedString. +func (c *Client) stringsToInternedStrings(strs []string) []domain.InternedString { + result := make([]domain.InternedString, len(strs)) + for i, s := range strs { + result[i] = domain.NewInternedString(s) + } + return result +} + +// Close implements ports.DaemonClient. +func (c *Client) Close() error { + return c.conn.Close() +} diff --git a/cli/internal/adapters/daemon/lifecycle.go b/cli/internal/adapters/daemon/lifecycle.go new file mode 100644 index 0000000..64a7bdd --- /dev/null +++ b/cli/internal/adapters/daemon/lifecycle.go @@ -0,0 +1,82 @@ +package daemon + +import ( + "sync" + "time" +) + +// Lifecycle manages daemon inactivity timeout and shutdown. +type Lifecycle struct { + mu sync.Mutex + timer *time.Timer + startTime time.Time + lastActivity time.Time + timeout time.Duration + shutdownChan chan struct{} + shutdownOnce sync.Once +} + +// NewLifecycle creates a new lifecycle manager with the given timeout. +func NewLifecycle(timeout time.Duration) *Lifecycle { + now := time.Now() + l := &Lifecycle{ + startTime: now, + lastActivity: now, + timeout: timeout, + shutdownChan: make(chan struct{}), + } + l.timer = time.AfterFunc(timeout, func() { + l.triggerShutdown() + }) + return l +} + +// ResetTimer resets the inactivity timer. Called on every activity. +func (l *Lifecycle) ResetTimer() { + l.mu.Lock() + defer l.mu.Unlock() + l.lastActivity = time.Now() + l.timer.Reset(l.timeout) +} + +// IdleRemaining returns the duration until auto-shutdown. +func (l *Lifecycle) IdleRemaining() time.Duration { + l.mu.Lock() + defer l.mu.Unlock() + elapsed := time.Since(l.lastActivity) + remaining := l.timeout - elapsed + if remaining < 0 { + return 0 + } + return remaining +} + +// Uptime returns how long the daemon has been running. +func (l *Lifecycle) Uptime() time.Duration { + return time.Since(l.startTime) +} + +// LastActivity returns the timestamp of the last activity. +func (l *Lifecycle) LastActivity() time.Time { + l.mu.Lock() + defer l.mu.Unlock() + return l.lastActivity +} + +// ShutdownChan returns a channel that closes when shutdown is triggered. +func (l *Lifecycle) ShutdownChan() <-chan struct{} { + return l.shutdownChan +} + +// TriggerShutdown initiates shutdown (idempotent). +func (l *Lifecycle) triggerShutdown() { + l.shutdownOnce.Do(func() { + close(l.shutdownChan) + }) +} + +// Shutdown stops the timer and triggers shutdown. +func (l *Lifecycle) Shutdown() { + l.timer.Stop() + l.triggerShutdown() +} diff --git a/cli/internal/adapters/daemon/lifecycle_test.go b/cli/internal/adapters/daemon/lifecycle_test.go new file mode 100644 index 0000000..6b1828e --- /dev/null +++ b/cli/internal/adapters/daemon/lifecycle_test.go @@ -0,0 +1,121 @@ +package daemon_test + +import ( + "testing" + "testing/synctest" + "time" + + "go.trai.ch/same/internal/adapters/daemon" +) + +func TestLifecycle_AutoShutdown(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + timeout := 100 * time.Millisecond + lc := daemon.NewLifecycle(timeout) + defer lc.Shutdown() + + select { + case <-lc.ShutdownChan(): + case <-time.After(200 * time.Millisecond): + t.Fatal("expected shutdown to be triggered") + } + synctest.Wait() + }) +} + +func TestLifecycle_ResetPreventsShutdown(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + timeout := 100 * time.Millisecond + lc := daemon.NewLifecycle(timeout) + defer lc.Shutdown() + + time.Sleep(50 * time.Millisecond) + lc.ResetTimer() + + select { + case <-lc.ShutdownChan(): + t.Fatal("shutdown should not have triggered yet") + case <-time.After(60 * time.Millisecond): + } + synctest.Wait() + }) +} + +func TestLifecycle_IdleRemaining(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + timeout := 100 * time.Millisecond + lc := daemon.NewLifecycle(timeout) + defer lc.Shutdown() + + remaining := lc.IdleRemaining() + if remaining > timeout { + t.Fatalf("idle remaining %v > timeout %v", remaining, timeout) + } + + time.Sleep(50 * time.Millisecond) + remainingAfter := lc.IdleRemaining() + + if remainingAfter >= remaining { + t.Fatalf("idle remaining should have decreased") + } + synctest.Wait() + }) +} + +func TestLifecycle_Uptime(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + lc := daemon.NewLifecycle(1 * time.Hour) + defer lc.Shutdown() + + time.Sleep(10 * time.Millisecond) + uptime := lc.Uptime() + + if uptime < 10*time.Millisecond { + t.Fatalf("uptime %v < 10ms", uptime) + } + synctest.Wait() + }) +} + +func TestLifecycle_LastActivity(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + lc := daemon.NewLifecycle(1 * time.Hour) + defer lc.Shutdown() + + initialActivity := lc.LastActivity() + if initialActivity.IsZero() { + t.Fatal("last activity should be set") + } + + time.Sleep(10 * time.Millisecond) + lc.ResetTimer() + + resetActivity := lc.LastActivity() + if !resetActivity.After(initialActivity) { + t.Fatal("last activity should have been updated") + } + synctest.Wait() + }) +} + +func TestLifecycle_Shutdown(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + lc := daemon.NewLifecycle(1 * time.Hour) + defer lc.Shutdown() + + select { + case <-lc.ShutdownChan(): + t.Fatal("should not have shutdown yet") + case <-time.After(10 * time.Millisecond): + } + + lc.Shutdown() + + select { + case <-lc.ShutdownChan(): + case <-time.After(10 * time.Millisecond): + t.Fatal("should have shutdown after calling Shutdown()") + } + synctest.Wait() + }) +} diff --git a/cli/internal/adapters/daemon/node.go b/cli/internal/adapters/daemon/node.go new file mode 100644 index 0000000..aa7f726 --- /dev/null +++ b/cli/internal/adapters/daemon/node.go @@ -0,0 +1,21 @@ +package daemon + +import ( + "context" + + "github.com/grindlemire/graft" + "go.trai.ch/same/internal/core/ports" +) + +// NodeID is the unique identifier for the daemon connector Graft node. +const NodeID graft.ID = "adapter.daemon" + +func init() { + graft.Register(graft.Node[ports.DaemonConnector]{ + ID: NodeID, + Cacheable: true, + Run: func(_ context.Context) (ports.DaemonConnector, error) { + return NewConnector() + }, + }) +} diff --git a/cli/internal/adapters/daemon/server.go b/cli/internal/adapters/daemon/server.go new file mode 100644 index 0000000..f3beb9a --- /dev/null +++ b/cli/internal/adapters/daemon/server.go @@ -0,0 +1,404 @@ +package daemon + +import ( + "context" + "errors" + "fmt" + "net" + "os" + "path/filepath" + "strconv" + + "go.trai.ch/same/api/daemon/v1" + "go.trai.ch/same/internal/adapters/watcher" + "go.trai.ch/same/internal/core/domain" + "go.trai.ch/same/internal/core/ports" + "go.trai.ch/zerr" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Server implements the gRPC daemon service. +type Server struct { + daemonv1.UnimplementedDaemonServiceServer + lifecycle *Lifecycle + cache *ServerCache + configLoader ports.ConfigLoader + envFactory ports.EnvironmentFactory + executor ports.Executor + watcherSvc *WatcherService + grpcServer *grpc.Server + listener net.Listener +} + +// WatcherService bundles the watcher, debouncer, and hash cache together. +type WatcherService struct { + Watcher ports.Watcher + Debouncer *watcher.Debouncer + HashCache ports.InputHashCache +} + +// NewServer creates a new daemon server. +func NewServer(lifecycle *Lifecycle) *Server { + s := &Server{ + lifecycle: lifecycle, + grpcServer: grpc.NewServer(), + } + daemonv1.RegisterDaemonServiceServer(s.grpcServer, s) + return s +} + +// NewServerWithDeps creates a new daemon server with dependencies for handling graph and environment requests. +func NewServerWithDeps( + lifecycle *Lifecycle, + configLoader ports.ConfigLoader, + envFactory ports.EnvironmentFactory, + executor ports.Executor, +) *Server { + s := &Server{ + lifecycle: lifecycle, + cache: NewServerCache(), + configLoader: configLoader, + envFactory: envFactory, + executor: executor, + grpcServer: grpc.NewServer(), + } + daemonv1.RegisterDaemonServiceServer(s.grpcServer, s) + return s +} + +// Serve starts the gRPC server on the UDS. +func (s *Server) Serve(ctx context.Context) error { + socketPath := domain.DefaultDaemonSocketPath() + + dir := filepath.Dir(socketPath) + if err := os.MkdirAll(dir, domain.DirPerm); err != nil { + return zerr.Wrap(err, "failed to create daemon directory") + } + + if err := os.Remove(socketPath); err != nil && !os.IsNotExist(err) { + return zerr.Wrap(err, "failed to remove stale socket") + } + + lis, err := net.Listen("unix", socketPath) + if err != nil { + return zerr.Wrap(err, "failed to listen on UDS") + } + s.listener = lis + // Note: There's a brief window between socket creation and chmod where + // the socket has default permissions. This is an acceptable trade-off + // for code clarity. For defense-in-depth, consider setting umask before + // Listen if this window becomes a concern. + + if err := os.Chmod(socketPath, domain.SocketPerm); err != nil { + _ = lis.Close() + return zerr.Wrap(err, "failed to set socket permissions") + } + + if err := s.writePIDFile(); err != nil { + return err + } + + defer s.cleanup() + + errCh := make(chan error, 1) + go func() { + errCh <- s.grpcServer.Serve(lis) + }() + + select { + case <-ctx.Done(): + s.grpcServer.GracefulStop() + return ctx.Err() + case <-s.lifecycle.ShutdownChan(): + s.grpcServer.GracefulStop() + return nil + case err := <-errCh: + return err + } +} + +func (s *Server) cleanup() { + _ = os.Remove(domain.DefaultDaemonSocketPath()) + _ = os.Remove(domain.DefaultDaemonPIDPath()) +} + +// Ping implements DaemonService.Ping. +func (s *Server) Ping(_ context.Context, _ *daemonv1.PingRequest) (*daemonv1.PingResponse, error) { + return &daemonv1.PingResponse{ + IdleRemainingSeconds: int64(s.lifecycle.IdleRemaining().Seconds()), + }, nil +} + +// Status implements DaemonService.Status. +func (s *Server) Status(_ context.Context, _ *daemonv1.StatusRequest) (*daemonv1.StatusResponse, error) { + pid := os.Getpid() + const maxInt32 = 2147483647 + if pid > maxInt32 { + pid = maxInt32 + } + return &daemonv1.StatusResponse{ + Running: true, + //nolint:gosec // G115: Safe conversion - pid is capped to maxInt32 above + Pid: int32(pid), + UptimeSeconds: int64(s.lifecycle.Uptime().Seconds()), + LastActivityUnix: s.lifecycle.LastActivity().Unix(), + IdleRemainingSeconds: int64(s.lifecycle.IdleRemaining().Seconds()), + }, nil +} + +// Shutdown implements DaemonService.Shutdown. +func (s *Server) Shutdown(_ context.Context, _ *daemonv1.ShutdownRequest) (*daemonv1.ShutdownResponse, error) { + s.lifecycle.Shutdown() + return &daemonv1.ShutdownResponse{Success: true}, nil +} + +func (s *Server) writePIDFile() error { + pidPath := domain.DefaultDaemonPIDPath() + pid := os.Getpid() + return os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", pid)), domain.PrivateFilePerm) +} + +// GetGraph implements DaemonService.GetGraph. +// Note: ctx parameter satisfies the gRPC interface but is not currently used for cancellation +// because configLoader.Load() does not accept context. Future enhancement: add context support +// to ConfigLoader.Load() for proper cancellation propagation. +// +//nolint:revive // ctx satisfies gRPC interface; see above note for future improvement +func (s *Server) GetGraph(ctx context.Context, req *daemonv1.GetGraphRequest) (*daemonv1.GetGraphResponse, error) { + // Guard: ensure server is configured for graph operations + if s.cache == nil || s.configLoader == nil { + return nil, status.Error(codes.FailedPrecondition, "server not configured for graph operations") + } + + // Convert proto mtimes to map + clientMtimes := make(map[string]int64) + for _, mtime := range req.ConfigMtimes { + clientMtimes[mtime.Path] = mtime.MtimeUnixNano + } + + // Reset inactivity timer + s.lifecycle.ResetTimer() + + // Check cache + if graph, cacheHit := s.cache.GetGraph(req.Cwd, clientMtimes); cacheHit { + return s.graphToResponse(graph, true), nil + } + + // Cache miss or stale, load the graph + graph, err := s.configLoader.Load(req.Cwd) + if err != nil { + return nil, zerr.Wrap(err, "failed to load graph") + } + + // Validate the graph to populate executionOrder for Walk() + if err := graph.Validate(); err != nil { + return nil, zerr.Wrap(err, "failed to validate graph") + } + + // Store in cache + entry := &domain.GraphCacheEntry{ + Graph: graph, + ConfigPaths: make([]string, 0, len(clientMtimes)), + Mtimes: clientMtimes, + } + for path := range clientMtimes { + entry.ConfigPaths = append(entry.ConfigPaths, path) + } + s.cache.SetGraph(req.Cwd, entry) + + return s.graphToResponse(graph, false), nil +} + +// GetEnvironment implements DaemonService.GetEnvironment. +func (s *Server) GetEnvironment( + ctx context.Context, + req *daemonv1.GetEnvironmentRequest, +) (*daemonv1.GetEnvironmentResponse, error) { + // Guard: ensure server is configured for environment operations + if s.cache == nil || s.envFactory == nil { + return nil, status.Error(codes.FailedPrecondition, "server not configured for environment operations") + } + + // Reset inactivity timer + s.lifecycle.ResetTimer() + + // Check cache + if envVars, cacheHit := s.cache.GetEnv(req.EnvId); cacheHit { + return &daemonv1.GetEnvironmentResponse{ + CacheHit: true, + EnvVars: envVars, + }, nil + } + + // Cache miss, resolve environment + envVars, err := s.envFactory.GetEnvironment(ctx, req.Tools) + if err != nil { + return nil, zerr.Wrap(err, "failed to get environment") + } + + // Store in cache + s.cache.SetEnv(req.EnvId, envVars) + + return &daemonv1.GetEnvironmentResponse{ + CacheHit: false, + EnvVars: envVars, + }, nil +} + +// graphToResponse converts a domain.Graph to a GetGraphResponse proto message. +func (s *Server) graphToResponse(graph *domain.Graph, cacheHit bool) *daemonv1.GetGraphResponse { + resp := &daemonv1.GetGraphResponse{ + CacheHit: cacheHit, + Root: graph.Root(), + } + + // Convert tasks + for task := range graph.Walk() { + taskProto := &daemonv1.TaskProto{ + Name: task.Name.String(), + Command: task.Command, + Inputs: s.internedStringsToStrings(task.Inputs), + Outputs: s.internedStringsToStrings(task.Outputs), + Tools: task.Tools, + Dependencies: s.internedStringsToStrings(task.Dependencies), + Environment: task.Environment, + WorkingDir: task.WorkingDir.String(), + RebuildStrategy: string(task.RebuildStrategy), + } + resp.Tasks = append(resp.Tasks, taskProto) + } + + return resp +} + +// internedStringsToStrings converts a slice of InternedString to plain strings. +func (s *Server) internedStringsToStrings(interned []domain.InternedString) []string { + result := make([]string, len(interned)) + for i, str := range interned { + result[i] = str.String() + } + return result +} + +// SetWatcherService sets the watcher service for the server. +// This must be called before Serve if the watcher service is needed. +func (s *Server) SetWatcherService(watcherSvc *WatcherService) { + s.watcherSvc = watcherSvc +} + +// GetInputHash implements DaemonService.GetInputHash. +// +//nolint:revive,unparam // ctx satisfies gRPC interface requirement +func (s *Server) GetInputHash( + ctx context.Context, + req *daemonv1.GetInputHashRequest, +) (*daemonv1.GetInputHashResponse, error) { + s.lifecycle.ResetTimer() + + // Guard: ensure watcher service is configured + if s.watcherSvc == nil { + return nil, status.Error(codes.FailedPrecondition, "watcher service not initialized") + } + + // Get the hash result from the cache using the request's context. + // This avoids race conditions by passing root/env directly. + result := s.watcherSvc.HashCache.GetInputHash(req.TaskName, req.Root, req.Environment) + + // Convert the ports.InputHashState to the proto enum + var state daemonv1.GetInputHashResponse_State + switch result.State { + case ports.HashReady: + state = daemonv1.GetInputHashResponse_READY + case ports.HashPending: + state = daemonv1.GetInputHashResponse_PENDING + default: + state = daemonv1.GetInputHashResponse_UNKNOWN + } + + return &daemonv1.GetInputHashResponse{ + State: state, + Hash: result.Hash, + }, nil +} + +// streamWriter implements io.Writer for streaming task output. +type streamWriter struct { + stream daemonv1.DaemonService_ExecuteTaskServer +} + +func (w *streamWriter) Write(p []byte) (int, error) { + if err := w.stream.Send(&daemonv1.ExecuteTaskResponse{Data: p}); err != nil { + return 0, err + } + return len(p), nil +} + +// getExitCode extracts the exit code from an error. +// It returns 0 for no error, or the actual exit code if the error +// contains one via zerr field, defaulting to 1 for generic errors. +func getExitCode(err error) int { + if err == nil { + return 0 + } + + // Check if this is a zerr with an exit_code field + // zerr implements an interface that allows field extraction + type fielder interface { + Field(key string) (interface{}, bool) + } + + var fieldErr fielder + if errors.As(err, &fieldErr) { + if code, found := fieldErr.Field("exit_code"); found { + if exitCode, ok := code.(int); ok { + return exitCode + } + } + } + + // Default to exit code 1 for generic errors + return 1 +} + +// ExecuteTask implements DaemonService.ExecuteTask. +func (s *Server) ExecuteTask( + req *daemonv1.ExecuteTaskRequest, + stream daemonv1.DaemonService_ExecuteTaskServer, +) error { + // Reset inactivity timer + s.lifecycle.ResetTimer() + + // Guard: ensure server is configured for task execution + if s.executor == nil { + return status.Error(codes.FailedPrecondition, "server not configured for task execution") + } + + // Reconstruct domain.Task from request + task := &domain.Task{ + Name: domain.NewInternedString(req.TaskName), + Command: req.Command, + WorkingDir: domain.NewInternedString(req.WorkingDir), + Environment: req.TaskEnvironment, + } + + // Create streaming writer + writer := &streamWriter{stream: stream} + + // Execute with PTY (via executor) + err := s.executor.Execute(stream.Context(), task, req.NixEnvironment, writer, writer) + + // Extract exit code from error + exitCode := getExitCode(err) + + // Set trailer with exit code + stream.SetTrailer(metadata.Pairs("x-exit-code", strconv.Itoa(exitCode))) + + // Return error status for non-zero exit + if exitCode != 0 { + return status.Errorf(codes.Unknown, "task failed with exit code %d", exitCode) + } + return nil +} diff --git a/cli/internal/adapters/daemon/spawner.go b/cli/internal/adapters/daemon/spawner.go new file mode 100644 index 0000000..7336d60 --- /dev/null +++ b/cli/internal/adapters/daemon/spawner.go @@ -0,0 +1,157 @@ +package daemon + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "syscall" + "time" + + "go.trai.ch/same/internal/core/domain" + "go.trai.ch/same/internal/core/ports" + "go.trai.ch/zerr" +) + +const ( + pollInterval = 100 * time.Millisecond + maxPollDuration = 5 * time.Second +) + +// Connector implements ports.DaemonConnector. +type Connector struct { + executablePath string +} + +// NewConnector creates a new daemon connector. +func NewConnector() (*Connector, error) { + exe, err := os.Executable() + if err != nil { + return nil, zerr.Wrap(err, "failed to determine executable path") + } + return &Connector{executablePath: exe}, nil +} + +// Connect returns a client, spawning the daemon if necessary. +func (c *Connector) Connect(ctx context.Context, root string) (ports.DaemonClient, error) { + client, err := Dial(root) + if err == nil { + if pingErr := client.Ping(ctx); pingErr == nil { + return client, nil + } + _ = client.Close() + } + + if spawnErr := c.Spawn(ctx, root); spawnErr != nil { + return nil, spawnErr + } + + client, err = Dial(root) + if err != nil { + return nil, zerr.Wrap(err, "daemon client creation failed") + } + + if pingErr := client.Ping(ctx); pingErr != nil { + _ = client.Close() + return nil, zerr.Wrap(pingErr, "daemon started but is not responsive") + } + + return client, nil +} + +// IsRunning checks if the daemon is running and responsive. +func (c *Connector) IsRunning(root string) bool { + if root == "" { + return false + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + return c.isRunningWithCtx(ctx, root) +} + +// isRunningWithCtx checks if the daemon is running and responsive, respecting the provided context. +func (c *Connector) isRunningWithCtx(ctx context.Context, root string) bool { + client, err := Dial(root) + if err != nil { + return false + } + defer func() { _ = client.Close() }() + + if err := client.Ping(ctx); err != nil { + return false + } + + return true +} + +// Spawn starts the daemon process in the background. +func (c *Connector) Spawn(ctx context.Context, root string) error { + if root == "" { + return zerr.New("root cannot be empty") + } + + absRoot, err := filepath.Abs(root) + if err != nil { + return zerr.Wrap(err, "failed to resolve absolute root path") + } + + daemonDir := filepath.Join(absRoot, filepath.Dir(domain.DefaultDaemonSocketPath())) + if mkdirErr := os.MkdirAll(daemonDir, domain.DirPerm); mkdirErr != nil { + return zerr.Wrap(mkdirErr, "failed to create daemon directory") + } + + logPath := filepath.Join(absRoot, domain.DefaultDaemonLogPath()) + //nolint:gosec // G304: logPath is from root + domain constant, not user input + logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, domain.PrivateFilePerm) + if err != nil { + return zerr.Wrap(err, "failed to open daemon log") + } + + //nolint:gosec // G204: executablePath is controlled, args are fixed literals + cmd := exec.Command(c.executablePath, "daemon", "serve") + cmd.Dir = absRoot + cmd.Stdout = logFile + cmd.Stderr = logFile + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setsid: true, + } + + if err := cmd.Start(); err != nil { + _ = logFile.Close() + return zerr.Wrap(err, domain.ErrDaemonSpawnFailed.Error()) + } + + go func() { + _ = cmd.Wait() + _ = logFile.Close() + }() + + if err := c.waitForDaemonStartup(ctx, absRoot); err != nil { + return err + } + + return nil +} + +// waitForDaemonStartup waits for the daemon to become responsive. +func (c *Connector) waitForDaemonStartup(ctx context.Context, root string) error { + start := time.Now() + for time.Since(start) < maxPollDuration { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if c.isRunningWithCtx(ctx, root) { + return nil + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(pollInterval): + } + } + return zerr.New("daemon failed to start within timeout") +} diff --git a/cli/internal/adapters/nix/env_factory_test.go b/cli/internal/adapters/nix/env_factory_test.go index 9b60c78..97e9a5f 100644 --- a/cli/internal/adapters/nix/env_factory_test.go +++ b/cli/internal/adapters/nix/env_factory_test.go @@ -9,7 +9,6 @@ import ( "path/filepath" "strings" "sync" - "sync/atomic" "testing" "testing/synctest" @@ -448,68 +447,120 @@ func TestSaveEnvToCache_MkdirError(t *testing.T) { } } -func TestGetEnvironment_Concurrency(t *testing.T) { +func TestGetEnvironment_ConcurrentCacheHit(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // No EXPECT: cache hit should bypass resolver entirely + resolver := mocks.NewMockDependencyResolver(ctrl) + + tmpDir := t.TempDir() + + tools := map[string]string{ + "go": "go@1.25.4", + } + + envID := domain.GenerateEnvID(tools) + cachePath := filepath.Join(tmpDir, "environments", envID+".json") + cachedEnv := []string{"GOROOT=/nix/store/test", "PATH=/nix/store/bin"} + if err := nix.SaveEnvToCache(cachePath, cachedEnv); err != nil { + t.Fatalf("Failed to setup cache: %v", err) + } + + factory := nix.NewEnvFactoryWithCache(resolver, tmpDir) + ctx := context.Background() + + var wg sync.WaitGroup + const numGoroutines = 5 + wg.Add(numGoroutines) + + results := make(chan []string, numGoroutines) + errs := make(chan error, numGoroutines) + + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + env, err := factory.GetEnvironment(ctx, tools) + if err != nil { + errs <- err + return + } + results <- env + }() + } + + wg.Wait() + close(results) + close(errs) + + for err := range errs { + t.Errorf("GetEnvironment failed: %v", err) + } + + count := 0 + for env := range results { + count++ + if len(env) == 0 { + t.Error("GetEnvironment returned empty environment") + } + } + + if count != numGoroutines { + t.Errorf("Expected %d results, got %d", numGoroutines, count) + } +} + +func TestGetEnvironment_Singleflight(t *testing.T) { synctest.Test(t, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() resolver := mocks.NewMockDependencyResolver(ctrl) - // Use atomic counter to verify single execution - var callCount int32 + tmpDir := t.TempDir() + + tools := map[string]string{ + "go": "go@1.25.4", + } started := make(chan struct{}) proceed := make(chan struct{}) - // Mock Resolve to wait for signal resolver.EXPECT(). Resolve(gomock.Any(), "go", "1.25.4"). DoAndReturn(func(_ context.Context, _, _ string) (string, string, error) { - atomic.AddInt32(&callCount, 1) close(started) - <-proceed // Wait for signal to complete - return "2788904d26dda6cfa1921c5abb7a2466ffe3cb8c", "pkgs.go", nil + <-proceed + return "", "", errors.New("intentional error to short-circuit nix call") }). Times(1) - tmpDir := t.TempDir() factory := nix.NewEnvFactoryWithCache(resolver, tmpDir) ctx := context.Background() - tools := map[string]string{ - "go": "go@1.25.4", - } - + const numGoroutines = 3 var wg sync.WaitGroup - wg.Add(2) + wg.Add(numGoroutines) - // Start two concurrent requests - for i := 0; i < 2; i++ { + for i := 0; i < numGoroutines; i++ { go func() { defer wg.Done() _, _ = factory.GetEnvironment(ctx, tools) }() } - // Wait for one routine to hit the mock synctest.Wait() select { case <-started: - // One routine has started processing default: t.Fatal("Resolve was not called") } - // Allow processing to complete close(proceed) synctest.Wait() wg.Wait() - - if atomic.LoadInt32(&callCount) != 1 { - t.Errorf("Resolve called %d times, want 1", callCount) - } }) } diff --git a/cli/internal/adapters/nix/resolver.go b/cli/internal/adapters/nix/resolver.go index fbfe7c9..b31c9d5 100644 --- a/cli/internal/adapters/nix/resolver.go +++ b/cli/internal/adapters/nix/resolver.go @@ -46,9 +46,6 @@ func NewResolver() (*Resolver, error) { // newResolverWithPath creates a Resolver with a custom cache path (used for testing). func newResolverWithPath(path string) (*Resolver, error) { cleanPath := filepath.Clean(path) - if err := os.MkdirAll(cleanPath, domain.DirPerm); err != nil { - return nil, zerr.Wrap(err, domain.ErrNixCacheCreateFailed.Error()) - } return &Resolver{ cacheDir: cleanPath, diff --git a/cli/internal/adapters/nix/resolver_test.go b/cli/internal/adapters/nix/resolver_test.go index e23176d..5797c3d 100644 --- a/cli/internal/adapters/nix/resolver_test.go +++ b/cli/internal/adapters/nix/resolver_test.go @@ -31,9 +31,9 @@ func TestNewResolver(t *testing.T) { t.Fatal("NewResolver() returned nil resolver") } - // Verify cache directory was created - if _, err := os.Stat(cachePath); os.IsNotExist(err) { - t.Errorf("cache directory was not created") + // Verify cache directory is NOT created eagerly (lazy creation) + if _, err := os.Stat(cachePath); err == nil { + t.Errorf("cache directory should not be created eagerly") } } @@ -482,7 +482,8 @@ func verifyCacheWasWritten(t *testing.T, tmpDir, alias, version, expectedHash st } func TestNewResolver_MkdirAllError(t *testing.T) { - // Create a file where the cache directory should be to cause MkdirAll to fail + // This test verifies that lazy directory creation fails gracefully when + // a file exists where the cache directory should be created. tmpDir := t.TempDir() conflictPath := filepath.Join(tmpDir, "conflict") @@ -491,14 +492,33 @@ func TestNewResolver_MkdirAllError(t *testing.T) { t.Fatalf("failed to create conflict file: %v", err) } - // Try to create resolver with a path that would require creating a directory where a file exists + // Create resolver with a path that would require creating a directory where a file exists cachePath := filepath.Join(conflictPath, "cache") - _, err := newResolverWithPath(cachePath) - if err == nil { - t.Error("newResolverWithPath() expected error when MkdirAll fails") + resolver, err := newResolverWithPath(cachePath) + // Resolver creation should succeed (no eager directory creation) + if err != nil { + t.Fatalf("newResolverWithPath() unexpected error = %v", err) + } + + // But attempting to save to cache should fail due to directory creation error + mockResponse := &nixHubResponse{ + Systems: map[string]SystemResponse{ + "x86_64-linux": { + FlakeInstallable: FlakeInstallable{ + AttrPath: "legacyPackages.x86_64-linux.go", + Ref: FlakeRef{Rev: "abc123"}, + }, + }, + }, } - if !strings.Contains(err.Error(), domain.ErrNixCacheCreateFailed.Error()) { - t.Errorf("error = %v, want error containing %v", err, domain.ErrNixCacheCreateFailed) + + // Construct the cache file path (similar to getCachePath()) + cacheFilePath := filepath.Join(cachePath, "test.json") + err = resolver.saveToCache(cacheFilePath, "go", "1.21", mockResponse) + if err == nil { + t.Fatal("saveToCache() expected error when directory creation fails") + } else if !strings.Contains(err.Error(), domain.ErrNixCacheWriteFailed.Error()) { + t.Errorf("error = %v, want error containing %v", err, domain.ErrNixCacheWriteFailed) } } diff --git a/cli/internal/adapters/watcher/debouncer.go b/cli/internal/adapters/watcher/debouncer.go new file mode 100644 index 0000000..17bb9ce --- /dev/null +++ b/cli/internal/adapters/watcher/debouncer.go @@ -0,0 +1,100 @@ +// Package watcher implements file system watching for proactive input hashing. +package watcher + +import ( + "sync" + "time" + "unique" +) + +// Debouncer coalesces rapid file system events into batched invalidations. +type Debouncer struct { + mu sync.Mutex + pending map[unique.Handle[string]]struct{} + timer *time.Timer + window time.Duration + callback func(paths []string) +} + +// NewDebouncer creates a new debouncer with the given time window and callback. +func NewDebouncer(window time.Duration, callback func(paths []string)) *Debouncer { + return &Debouncer{ + pending: make(map[unique.Handle[string]]struct{}), + window: window, + callback: callback, + } +} + +// Add adds a file path to the pending events set. +func (d *Debouncer) Add(path string) { + d.mu.Lock() + defer d.mu.Unlock() + + // Add the path to the pending set using an interned handle for deduplication. + handle := unique.Make(path) + d.pending[handle] = struct{}{} + + // Reset the timer if it exists, or create a new one. + if d.timer != nil { + d.timer.Stop() + } + d.timer = time.AfterFunc(d.window, d.fire) +} + +// fire is called when the debounce window expires. +func (d *Debouncer) fire() { + d.mu.Lock() + + // Check if there's anything to process (protects against race with Flush). + if len(d.pending) == 0 { + d.timer = nil + d.mu.Unlock() + return + } + + // Convert the pending set to a slice of paths. + paths := make([]string, 0, len(d.pending)) + for handle := range d.pending { + paths = append(paths, handle.Value()) + } + + // Clear the pending set and timer. + d.pending = make(map[unique.Handle[string]]struct{}) + d.timer = nil + d.mu.Unlock() + + // Call the callback with the coalesced paths (asynchronously to match Flush behavior). + if len(paths) > 0 && d.callback != nil { + go d.callback(paths) + } +} + +// Flush immediately triggers the debounce callback with all pending paths. +// This method blocks until the callback completes, making it suitable for +// graceful shutdown scenarios where work must finish before proceeding. +func (d *Debouncer) Flush() { + d.mu.Lock() + if d.timer != nil { + if !d.timer.Stop() { + // Timer already fired, let it complete rather than processing twice. + d.mu.Unlock() + return + } + d.timer = nil + } + + // Extract paths to process. + paths := make([]string, 0, len(d.pending)) + for handle := range d.pending { + paths = append(paths, handle.Value()) + } + d.pending = make(map[unique.Handle[string]]struct{}) + d.mu.Unlock() + + // Call the callback synchronously (blocks until complete). + // This differs from fire() which is async, but is intentional for + // flush scenarios where completion is required before proceeding. + if len(paths) > 0 && d.callback != nil { + d.callback(paths) + } +} diff --git a/cli/internal/adapters/watcher/hash_cache.go b/cli/internal/adapters/watcher/hash_cache.go new file mode 100644 index 0000000..0495339 --- /dev/null +++ b/cli/internal/adapters/watcher/hash_cache.go @@ -0,0 +1,277 @@ +package watcher + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "sort" + "sync" + "time" + "unique" + + "go.trai.ch/same/internal/core/domain" + "go.trai.ch/same/internal/core/ports" +) + +// PendingRehash represents a cache entry that needs to be recomputed. +type PendingRehash struct { + TaskName string + Root string + Env map[string]string +} + +// HashCache implements ports.InputHashCache with background rehashing. +type HashCache struct { + mu sync.RWMutex + entries map[unique.Handle[string]]*domain.TaskHashEntry + pathToTasks map[unique.Handle[string]][]cacheEntry + cacheKeyContext map[unique.Handle[string]]PendingRehash // Maps cache key to its context for invalidation + pendingRehashes []PendingRehash // Track what needs rehashing with full context + pendingKeys map[unique.Handle[string]]struct{} // O(1) pending lookup set + tasks map[unique.Handle[string]]*domain.Task // Full task definitions + hasher ports.Hasher + resolver ports.InputResolver +} + +// cacheEntry links a path to a cache key for invalidation. +type cacheEntry struct { + cacheKey unique.Handle[string] +} + +// NewHashCache creates a new hash cache. +func NewHashCache(hasher ports.Hasher, resolver ports.InputResolver) *HashCache { + return &HashCache{ + entries: make(map[unique.Handle[string]]*domain.TaskHashEntry), + pathToTasks: make(map[unique.Handle[string]][]cacheEntry), + cacheKeyContext: make(map[unique.Handle[string]]PendingRehash), + pendingRehashes: make([]PendingRehash, 0), + pendingKeys: make(map[unique.Handle[string]]struct{}), + tasks: make(map[unique.Handle[string]]*domain.Task), + hasher: hasher, + resolver: resolver, + } +} + +// copyEnv creates a deep copy of an environment map to prevent shared reference bugs. +func copyEnv(env map[string]string) map[string]string { + if env == nil { + return nil + } + copied := make(map[string]string, len(env)) + for k, v := range env { + copied[k] = v + } + return copied +} + +// makeCacheKey creates a unique cache key from task name, root, and environment. +// This ensures different contexts don't collide in the cache. +// Uses a truncated SHA-256 hash (64 bits) of the environment for space efficiency. +// Collision probability is negligible for typical daemon workloads. +func (h *HashCache) makeCacheKey(taskName, root string, env map[string]string) unique.Handle[string] { + // Sort environment keys for deterministic hashing + keys := make([]string, 0, len(env)) + for k := range env { + keys = append(keys, k) + } + sort.Strings(keys) + + // Build environment string + envStr := "" + for _, k := range keys { + envStr += fmt.Sprintf("%s=%s;", k, env[k]) + } + + // Hash the environment to keep key size reasonable + envHash := sha256.Sum256([]byte(envStr)) + envHashStr := hex.EncodeToString(envHash[:8]) // Use first 8 bytes (64 bits) + + // Combine task name, root, and env hash + cacheKey := fmt.Sprintf("%s|%s|%s", taskName, root, envHashStr) + return unique.Make(cacheKey) +} + +// GetInputHash returns the current hash state and value for the given task. +func (h *HashCache) GetInputHash(taskName, root string, env map[string]string) ports.InputHashResult { + h.mu.RLock() + defer h.mu.RUnlock() + + cacheKey := h.makeCacheKey(taskName, root, env) + + // Check if this specific context is pending rehash using O(1) set lookup. + if _, pending := h.pendingKeys[cacheKey]; pending { + return ports.InputHashResult{State: ports.HashPending} + } + + // Check if we have a cached entry. + if entry, ok := h.entries[cacheKey]; ok { + return ports.InputHashResult{ + State: ports.HashReady, + Hash: entry.Hash, + } + } + + // Task is not cached yet. + return ports.InputHashResult{State: ports.HashUnknown} +} + +// Invalidate marks cached hashes for tasks affected by the changed paths. +// For each affected cache entry, we delete it and add it to the pending list for background rehashing. +func (h *HashCache) Invalidate(paths []string) { + h.mu.Lock() + defer h.mu.Unlock() + + // For each changed path, find all cache entries that depend on it. + for _, path := range paths { + pathHandle := unique.Make(path) + if entries, ok := h.pathToTasks[pathHandle]; ok { + for _, entry := range entries { + // Look up the full context for this cache key + if context, ok := h.cacheKeyContext[entry.cacheKey]; ok { + // Add to pending rehashes if not already there (O(1) check with pendingKeys) + if _, exists := h.pendingKeys[entry.cacheKey]; !exists { + // Deep copy the env map to prevent shared reference bugs + h.pendingRehashes = append(h.pendingRehashes, PendingRehash{ + TaskName: context.TaskName, + Root: context.Root, + Env: copyEnv(context.Env), + }) + h.pendingKeys[entry.cacheKey] = struct{}{} + } + } + + // Delete the stale cache entry + delete(h.entries, entry.cacheKey) + } + } + } +} + +// GetTask retrieves the stored task definition by name. +// This is used by background workers to rehash pending tasks. +func (h *HashCache) GetTask(taskName string) (*domain.Task, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + + handle := unique.Make(taskName) + task, ok := h.tasks[handle] + return task, ok +} + +// ComputeHash computes and caches the hash for a task with the given context. +// This should be called to populate the cache for a specific task/root/env combination. +func (h *HashCache) ComputeHash(task *domain.Task, root string, env map[string]string) error { + // Extract string inputs from task. + inputs := make([]string, len(task.Inputs)) + for i, input := range task.Inputs { + inputs[i] = input.String() + } + + // Resolve inputs to concrete paths. + resolved, err := h.resolver.ResolveInputs(inputs, root) + if err != nil { + return err + } + + // Compute the hash with the full task. + hash, err := h.hasher.ComputeInputHash(task, env, resolved) + if err != nil { + return err + } + + // Update the cache with the full task information. + h.updateCache(task, root, env, hash, resolved) + + return nil +} + +// updateCache updates the cache entry for a task and rebuilds the path-to-task index. +func (h *HashCache) updateCache(task *domain.Task, root string, env map[string]string, hash string, resolved []string) { + h.mu.Lock() + defer h.mu.Unlock() + + cacheKey := h.makeCacheKey(task.Name.String(), root, env) + taskHandle := task.Name.Value() + pathHandles := make([]unique.Handle[string], len(resolved)) + + // Remove old index entries for this cache key. + h.removeTaskFromIndex(cacheKey) + + // Add new entry and build new index. + for i, path := range resolved { + pathHandle := unique.Make(path) + pathHandles[i] = pathHandle + + // Add to path-to-task index (using cache key for invalidation). + h.pathToTasks[pathHandle] = append(h.pathToTasks[pathHandle], cacheEntry{cacheKey: cacheKey}) + } + + // Store the cache entry. + h.entries[cacheKey] = &domain.TaskHashEntry{ + Hash: hash, + ResolvedInputs: pathHandles, + ComputedAt: time.Now(), + } + + // Store the cache key context for future invalidation. + // Deep copy the env map to prevent shared reference bugs. + h.cacheKeyContext[cacheKey] = PendingRehash{ + TaskName: task.Name.String(), + Root: root, + Env: copyEnv(env), + } + + // Store the task definition (using simple task name handle). + h.tasks[taskHandle] = task + + // Remove from pending rehashes if it was pending. + if _, wasPending := h.pendingKeys[cacheKey]; wasPending { + // O(1) removal from pending keys set + delete(h.pendingKeys, cacheKey) + + // O(n) removal from pending list (needed for background worker iteration) + for i, pending := range h.pendingRehashes { + pendingKey := h.makeCacheKey(pending.TaskName, pending.Root, pending.Env) + if pendingKey == cacheKey { + h.pendingRehashes = append(h.pendingRehashes[:i], h.pendingRehashes[i+1:]...) + break + } + } + } +} + +// removeTaskFromIndex removes all index entries for the given cache key. +func (h *HashCache) removeTaskFromIndex(cacheKey unique.Handle[string]) { + for path, entries := range h.pathToTasks { + for i, entry := range entries { + if entry.cacheKey == cacheKey { + // Remove this entry from the slice. + h.pathToTasks[path] = append(entries[:i], entries[i+1:]...) + if len(h.pathToTasks[path]) == 0 { + // Delete empty entries. + delete(h.pathToTasks, path) + } + break + } + } + } +} + +// GetPendingTasks returns a list of pending rehash entries with full context. +// This is used by the background worker to know which tasks to rehash. +func (h *HashCache) GetPendingTasks() []PendingRehash { + h.mu.RLock() + defer h.mu.RUnlock() + + // Return a deep copy of the pending list to avoid exposing internal state. + // Must deep-copy Env maps to prevent shared reference bugs. + pending := make([]PendingRehash, len(h.pendingRehashes)) + for i, p := range h.pendingRehashes { + pending[i] = PendingRehash{ + TaskName: p.TaskName, + Root: p.Root, + Env: copyEnv(p.Env), + } + } + return pending +} diff --git a/cli/internal/adapters/watcher/node.go b/cli/internal/adapters/watcher/node.go new file mode 100644 index 0000000..4d61341 --- /dev/null +++ b/cli/internal/adapters/watcher/node.go @@ -0,0 +1,65 @@ +package watcher + +import ( + "context" + "time" + + "github.com/grindlemire/graft" + "go.trai.ch/same/internal/adapters/fs" + "go.trai.ch/same/internal/core/ports" +) + +const ( + // WatcherNodeID is the unique identifier for the file watcher Graft node. + WatcherNodeID graft.ID = "adapter.watcher" + // HashCacheNodeID is the unique identifier for the input hash cache Graft node. + HashCacheNodeID graft.ID = "adapter.hash_cache" +) + +func init() { + // Watcher Node + graft.Register(graft.Node[ports.Watcher]{ + ID: WatcherNodeID, + Cacheable: true, + Run: func(_ context.Context) (ports.Watcher, error) { + return NewWatcher() + }, + }) + + // HashCache Node + graft.Register(graft.Node[*HashCache]{ + ID: HashCacheNodeID, + Cacheable: true, + DependsOn: []graft.ID{fs.HasherNodeID, fs.ResolverNodeID}, + Run: func(ctx context.Context) (*HashCache, error) { + hasher, err := graft.Dep[ports.Hasher](ctx) + if err != nil { + return nil, err + } + resolver, err := graft.Dep[ports.InputResolver](ctx) + if err != nil { + return nil, err + } + // The actual environment and root will come from the daemon's runtime context. + return NewHashCache(hasher, resolver), nil + }, + }) +} + +// NodeID returns the Graft node ID for a given port interface type. +// This is a helper to map port types to their corresponding node IDs. +func NodeID(portType any) graft.ID { + switch portType.(type) { + case ports.Watcher: + return WatcherNodeID + case *HashCache: + return HashCacheNodeID + default: + // This is a compile-time check to ensure the type is handled. + // If you get a compilation error here, add the new port type to the switch. + panic("unknown port type") + } +} + +// DefaultDebounceWindow is the default time window for debouncing file events. +const DefaultDebounceWindow = 50 * time.Millisecond diff --git a/cli/internal/adapters/watcher/watcher.go b/cli/internal/adapters/watcher/watcher.go new file mode 100644 index 0000000..77fa636 --- /dev/null +++ b/cli/internal/adapters/watcher/watcher.go @@ -0,0 +1,186 @@ +package watcher + +import ( + "context" + "fmt" + "io/fs" + "iter" + "os" + "path/filepath" + "unique" + + "github.com/fsnotify/fsnotify" + "go.trai.ch/same/internal/core/ports" +) + +var _ ports.Watcher = (*Watcher)(nil) + +// shouldSkipDirectories are directories that should not be watched. +var shouldSkipDirectories = map[string]bool{ + ".git": true, + ".jj": true, + "node_modules": true, +} + +const eventChannelBuffer = 100 + +// Watcher implements file system watching using fsnotify. +type Watcher struct { + fsWatcher *fsnotify.Watcher + root unique.Handle[string] + events chan ports.WatchEvent +} + +// NewWatcher creates a new file system watcher. +func NewWatcher() (*Watcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &Watcher{ + fsWatcher: watcher, + events: make(chan ports.WatchEvent, eventChannelBuffer), + }, nil +} + +// Start begins watching the given root directory recursively. +func (w *Watcher) Start(ctx context.Context, root string) error { + w.root = unique.Make(root) + + // Walk the directory tree and add all directories to the watcher. + for dir := range w.watchRecursively(root) { + if err := w.fsWatcher.Add(dir); err != nil { + return err + } + } + + // Start processing events in a goroutine. + go w.processEvents(ctx) + + return nil +} + +// Stop stops the watcher and releases all resources. +func (w *Watcher) Stop() error { + return w.fsWatcher.Close() +} + +// Events returns an iterator of file system events. +func (w *Watcher) Events() iter.Seq[ports.WatchEvent] { + return func(yield func(ports.WatchEvent) bool) { + for event := range w.events { + if !yield(event) { + return + } + } + } +} + +// watchRecursively walks the directory tree and yields all directories. +func (w *Watcher) watchRecursively(root string) iter.Seq[string] { + return func(yield func(string) bool) { + _ = filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { + if err != nil { + // Continue walking even if there's an error accessing a directory. + return nil //nolint:nilerr // This is intentional - we want to skip problematic directories + } + if d.IsDir() { + if w.shouldSkip(d.Name()) { + return fs.SkipDir + } + if !yield(path) { + return filepath.SkipAll + } + } + return nil + }) + } +} + +// shouldSkip returns true if the directory should be skipped. +func (w *Watcher) shouldSkip(name string) bool { + return shouldSkipDirectories[name] +} + +// processEvents processes raw fsnotify events and converts them to ports.WatchEvent. +// +//nolint:cyclop // This function is complex due to multiple event types and error handling +func (w *Watcher) processEvents(ctx context.Context) { + defer close(w.events) + + for { + select { + case <-ctx.Done(): + return + case event, ok := <-w.fsWatcher.Events: + if !ok { + return + } + + // Convert fsnotify event to ports.WatchEvent. + watchEvent := w.convertEvent(event) + if watchEvent == nil { + continue + } + + // Send the event to the output channel. + select { + case w.events <- *watchEvent: + case <-ctx.Done(): + return + } + + // If a new directory was created, add it to the watcher. + if event.Op&fsnotify.Create == fsnotify.Create && watchEvent.Operation == ports.OpCreate { + if info, err := os.Stat(event.Name); err == nil && info.IsDir() && !w.shouldSkip(info.Name()) { + // Recursively add the new directory and its subdirectories. + for dir := range w.watchRecursively(event.Name) { + _ = w.fsWatcher.Add(dir) + } + } + } + + case err, ok := <-w.fsWatcher.Errors: + if !ok { + return + } + // Log error to stderr and continue processing. + fmt.Fprintf(os.Stderr, "watcher: file system error: %v\n", err) + } + } +} + +// convertEvent converts an fsnotify event to a ports.WatchEvent. +func (w *Watcher) convertEvent(event fsnotify.Event) *ports.WatchEvent { + path := event.Name + + if event.Op&fsnotify.Write == fsnotify.Write { + return &ports.WatchEvent{ + Path: path, + Operation: ports.OpWrite, + } + } + + if event.Op&fsnotify.Create == fsnotify.Create { + return &ports.WatchEvent{ + Path: path, + Operation: ports.OpCreate, + } + } + + if event.Op&fsnotify.Remove == fsnotify.Remove { + return &ports.WatchEvent{ + Path: path, + Operation: ports.OpRemove, + } + } + + if event.Op&fsnotify.Rename == fsnotify.Rename { + return &ports.WatchEvent{ + Path: path, + Operation: ports.OpRename, + } + } + + return nil +} diff --git a/cli/internal/app/app.go b/cli/internal/app/app.go index b81c406..51bcd0b 100644 --- a/cli/internal/app/app.go +++ b/cli/internal/app/app.go @@ -6,11 +6,14 @@ import ( "errors" "fmt" "os" + "path/filepath" "runtime" + "time" tea "github.com/charmbracelet/bubbletea" "go.opentelemetry.io/otel" sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.trai.ch/same/internal/adapters/daemon" "go.trai.ch/same/internal/adapters/detector" "go.trai.ch/same/internal/adapters/linear" "go.trai.ch/same/internal/adapters/telemetry" @@ -31,6 +34,7 @@ type App struct { hasher ports.Hasher resolver ports.InputResolver envFactory ports.EnvironmentFactory + connector ports.DaemonConnector teaOptions []tea.ProgramOption disableTick bool } @@ -44,6 +48,7 @@ func New( hasher ports.Hasher, resolver ports.InputResolver, envFactory ports.EnvironmentFactory, + connector ports.DaemonConnector, ) *App { return &App{ configLoader: loader, @@ -53,6 +58,7 @@ func New( hasher: hasher, resolver: resolver, envFactory: envFactory, + connector: connector, } } @@ -72,27 +78,73 @@ func (a *App) WithDisableTick() *App { // RunOptions configuration for the Run method. type RunOptions struct { - NoCache bool - Inspect bool - OutputMode string + NoCache bool + Inspect bool + InspectOnError bool + OutputMode string + NoDaemon bool // When true, bypass remote daemon execution } // Run executes the build process for the specified targets. // //nolint:cyclop // orchestration function func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) error { - // 1. Load the graph - graph, err := a.configLoader.Load(".") + // 0. Get absolute path of current working directory + cwd, err := os.Getwd() if err != nil { - return zerr.Wrap(err, "failed to load configuration") + return zerr.Wrap(err, "failed to get current working directory") } - // 2. Validate targets + // 1. Discover workspace root + root, err := a.configLoader.DiscoverRoot(cwd) + if err != nil { + return zerr.Wrap(err, "failed to discover workspace root") + } + + // 2. Connect to daemon (if available and not disabled) and load graph from daemon or fallback to local + var graph *domain.Graph + var client ports.DaemonClient + var daemonAvailable bool + + if !opts.NoDaemon { + var clientErr error + client, clientErr = a.connector.Connect(ctx, root) + if clientErr == nil && client != nil { + // Daemon is available, try to get graph from daemon + daemonAvailable = true + defer func() { + _ = client.Close() + }() + + // Discover config paths and mtimes + mtimes, mtimeErr := a.configLoader.DiscoverConfigPaths(cwd) + if mtimeErr != nil { + return zerr.Wrap(mtimeErr, "failed to discover config paths") + } + + // Try to get graph from daemon + graph, _, err = client.GetGraph(ctx, cwd, mtimes) + if err != nil { + // On daemon error, we'll fall through to local loading + graph = nil + } + } + } + + // Load graph locally if not already loaded from daemon + if graph == nil || opts.NoDaemon { + graph, err = a.configLoader.Load(cwd) + if err != nil { + return zerr.Wrap(err, "failed to load configuration") + } + } + + // 3. Validate targets if len(targetNames) == 0 { return domain.ErrNoTargetsSpecified } - // 3. Initialize Renderer + // 4. Initialize Renderer // Detect environment and resolve output mode autoMode := detector.DetectEnvironment() mode := detector.ResolveMode(autoMode, opts.OutputMode) @@ -109,7 +161,7 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er renderer = linear.NewRenderer(os.Stdout, os.Stderr) } - // 4. Initialize Telemetry + // 5. Initialize Telemetry // Create a bridge that sends OTel spans to the renderer. bridge := telemetry.NewBridge(renderer) @@ -125,7 +177,7 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er _ = tracer.Shutdown(ctx) }() - // 5. Initialize Scheduler + // 6. Initialize Scheduler sched := scheduler.NewScheduler( a.executor, a.store, @@ -133,9 +185,14 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er a.resolver, tracer, a.envFactory, - ) + ).WithNoDaemon(opts.NoDaemon) + + // Pass daemon client to scheduler if available + if daemonAvailable { + sched.WithDaemon(client) + } - // 6. Run Renderer and Scheduler concurrently + // 7. Run Renderer and Scheduler concurrently g, ctx := errgroup.WithContext(ctx) // Renderer Routine @@ -156,10 +213,12 @@ func (a *App) Run(ctx context.Context, targetNames []string, opts RunOptions) er // Print panic info before renderer shutdown fmt.Fprintf(os.Stderr, "Scheduler panic: %v\n", r) } - // Stop renderer ONLY if: - // 1. Inspect mode is not enabled AND - // 2. No build failure occurred - if !opts.Inspect && schedErr == nil { + // Calculate keepOpen state: renderer should stay open if + // 1. Inspect mode is enabled OR + // 2. InspectOnError is enabled AND an error occurred + keepOpen := opts.Inspect || (opts.InspectOnError && schedErr != nil) + // Stop renderer if keepOpen is false + if !keepOpen { _ = renderer.Stop() } }() @@ -182,6 +241,17 @@ type CleanOptions struct { // Clean removes cache and build artifacts based on the provided options. func (a *App) Clean(_ context.Context, options CleanOptions) error { + // Discover workspace root + cwd, err := os.Getwd() + if err != nil { + return zerr.Wrap(err, "failed to get current working directory") + } + + root, err := a.configLoader.DiscoverRoot(cwd) + if err != nil { + return zerr.Wrap(err, "failed to discover workspace root") + } + var errs error // Helper to remove a directory and log the action @@ -196,12 +266,12 @@ func (a *App) Clean(_ context.Context, options CleanOptions) error { } if options.Build { - remove(domain.DefaultStorePath(), "build info store") + remove(filepath.Join(root, domain.DefaultStorePath()), "build info store") } if options.Tools { - remove(domain.DefaultNixHubCachePath(), "nix tool cache") - remove(domain.DefaultEnvCachePath(), "environment cache") + remove(filepath.Join(root, domain.DefaultNixHubCachePath()), "nix tool cache") + remove(filepath.Join(root, domain.DefaultEnvCachePath()), "environment cache") } return errs @@ -209,12 +279,98 @@ func (a *App) Clean(_ context.Context, options CleanOptions) error { // setupOTel configures the OpenTelemetry SDK with the renderer bridge. func setupOTel(bridge *telemetry.Bridge) { - // Create a new TracerProvider with the bridge as a SpanProcessor. - // This ensures that all started spans are reported to the renderer. tp := sdktrace.NewTracerProvider( sdktrace.WithSpanProcessor(bridge), ) - // Register it as the global provider. otel.SetTracerProvider(tp) } + +// ServeDaemon starts the daemon server. +func (a *App) ServeDaemon(ctx context.Context) error { + lifecycle := daemon.NewLifecycle(domain.DaemonInactivityTimeout) + server := daemon.NewServerWithDeps( + lifecycle, + a.configLoader, + a.envFactory, + a.executor, + ) + + a.logger.Info("daemon starting") + + if err := server.Serve(ctx); err != nil { + return zerr.Wrap(err, "daemon server error") + } + + a.logger.Info("daemon stopped") + return nil +} + +// DaemonStatus returns the current daemon status. +func (a *App) DaemonStatus(ctx context.Context) error { + cwd, err := os.Getwd() + if err != nil { + return zerr.Wrap(err, "failed to get current working directory") + } + + root, err := a.configLoader.DiscoverRoot(cwd) + if err != nil { + return zerr.Wrap(err, "failed to discover workspace root") + } + + if !a.connector.IsRunning(root) { + a.logger.Info("Running: false") + return nil + } + + client, err := a.connector.Connect(ctx, root) + if err != nil { + return zerr.Wrap(err, "failed to connect to daemon") + } + defer func() { + _ = client.Close() + }() + + status, err := client.Status(ctx) + if err != nil { + return zerr.Wrap(err, "failed to get daemon status") + } + + a.logger.Info(fmt.Sprintf("Running: %v", status.Running)) + a.logger.Info(fmt.Sprintf("PID: %d", status.PID)) + a.logger.Info(fmt.Sprintf("Uptime: %v", status.Uptime)) + ago := time.Since(status.LastActivity).Truncate(time.Second) + a.logger.Info(fmt.Sprintf("Last Activity: %s (%s ago)", status.LastActivity.Format("15:04:05"), ago)) + a.logger.Info(fmt.Sprintf("Idle Remaining: %v", status.IdleRemaining)) + + return nil +} + +// StopDaemon stops the daemon. +func (a *App) StopDaemon(ctx context.Context) error { + cwd, err := os.Getwd() + if err != nil { + return zerr.Wrap(err, "failed to get current working directory") + } + + root, err := a.configLoader.DiscoverRoot(cwd) + if err != nil { + return zerr.Wrap(err, "failed to discover workspace root") + } + + client, err := a.connector.Connect(ctx, root) + if err != nil { + return zerr.Wrap(err, "failed to connect to daemon") + } + defer func() { + _ = client.Close() + }() + + a.logger.Info("stopping daemon") + if err := client.Shutdown(ctx); err != nil { + return zerr.Wrap(err, "failed to stop daemon") + } + + a.logger.Info("daemon stopped") + return nil +} diff --git a/cli/internal/app/app_test.go b/cli/internal/app/app_test.go index 3287228..d252276 100644 --- a/cli/internal/app/app_test.go +++ b/cli/internal/app/app_test.go @@ -44,6 +44,7 @@ func TestApp_Build(t *testing.T) { mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup Graph g := domain.NewGraph() @@ -52,7 +53,9 @@ func TestApp_Build(t *testing.T) { _ = g.AddTask(task) // Setup App - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("")), tea.WithOutput(io.Discard), @@ -61,13 +64,15 @@ func TestApp_Build(t *testing.T) { ). WithDisableTick() - mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) + mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) // Expectations - mockLoader.EXPECT().Load(".").Return(g, nil) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) - mockStore.EXPECT().Get("task1").Return(nil, nil) + mockStore.EXPECT().Get(".", "task1").Return(nil, nil) mockExecutor.EXPECT().Execute(gomock.Any(), task, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockStore.EXPECT().Put(gomock.Any()).Return(nil) + mockStore.EXPECT().Put(".", gomock.Any()).Return(nil) // Run err = a.Run(context.Background(), []string{"task1"}, app.RunOptions{NoCache: false}) @@ -105,10 +110,13 @@ func TestApp_Run_NoTargets(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup App - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("")), tea.WithOutput(io.Discard), @@ -116,8 +124,10 @@ func TestApp_Run_NoTargets(t *testing.T) { ). WithDisableTick() + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) // Expectations - mockLoader.EXPECT().Load(".").Return(domain.NewGraph(), nil) + mockLoader.EXPECT().Load(gomock.Any()).Return(domain.NewGraph(), nil) // Execute err = a.Run(context.Background(), nil, app.RunOptions{NoCache: false}) @@ -157,10 +167,13 @@ func TestApp_Run_ConfigLoaderError(t *testing.T) { mockHasher := mocks.NewMockHasher(ctrl) mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) + mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup App - mockLogger := mocks.NewMockLogger(ctrl) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("")), tea.WithOutput(io.Discard), @@ -168,8 +181,10 @@ func TestApp_Run_ConfigLoaderError(t *testing.T) { ). WithDisableTick() + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) // Expectations - loader fails - mockLoader.EXPECT().Load(".").Return(nil, errors.New("config load error")) + mockLoader.EXPECT().Load(gomock.Any()).Return(nil, errors.New("config load error")) // Execute err = a.Run(context.Background(), []string{"task1"}, app.RunOptions{NoCache: false}) @@ -213,6 +228,7 @@ func TestApp_Run_BuildExecutionFailed(t *testing.T) { mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) // Setup Graph g := domain.NewGraph() @@ -221,7 +237,9 @@ func TestApp_Run_BuildExecutionFailed(t *testing.T) { _ = g.AddTask(task) // Setup App - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("")), tea.WithOutput(io.Discard), @@ -230,11 +248,13 @@ func TestApp_Run_BuildExecutionFailed(t *testing.T) { ). WithDisableTick() - mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) + mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) // Expectations - mockLoader.EXPECT().Load(".").Return(g, nil) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) - mockStore.EXPECT().Get("task1").Return(nil, nil) + mockStore.EXPECT().Get(".", "task1").Return(nil, nil) // Mock Executor failure mockExecutor.EXPECT().Execute(gomock.Any(), task, gomock.Any(), gomock.Any(), gomock.Any()). Return(errors.New("command failed")) @@ -364,13 +384,16 @@ func TestApp_Clean(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() + mockLoader := mocks.NewMockConfigLoader(ctrl) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockLogger := mocks.NewMockLogger(ctrl) // We expect some logs, but we can be loose or strict. // Let's just allow any Info calls. mockLogger.EXPECT().Info(gomock.Any()).AnyTimes() // Null dependencies for others - a := app.New(nil, nil, mockLogger, nil, nil, nil, nil) + a := app.New(mockLoader, nil, mockLogger, nil, nil, nil, nil, nil) err = a.Clean(context.Background(), tt.options) if err != nil { @@ -409,13 +432,16 @@ func TestApp_Run_LinearMode(t *testing.T) { mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) g := domain.NewGraph() g.SetRoot(".") task := &domain.Task{Name: domain.NewInternedString("task1"), WorkingDir: domain.NewInternedString("Root")} _ = g.AddTask(task) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("")), tea.WithOutput(io.Discard), @@ -424,12 +450,14 @@ func TestApp_Run_LinearMode(t *testing.T) { ). WithDisableTick() - mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) - mockLoader.EXPECT().Load(".").Return(g, nil) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) + mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) - mockStore.EXPECT().Get("task1").Return(nil, nil) + mockStore.EXPECT().Get(".", "task1").Return(nil, nil) mockExecutor.EXPECT().Execute(gomock.Any(), task, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockStore.EXPECT().Put(gomock.Any()).Return(nil) + mockStore.EXPECT().Put(".", gomock.Any()).Return(nil) err = a.Run(context.Background(), []string{"task1"}, app.RunOptions{ NoCache: false, @@ -468,13 +496,16 @@ func TestApp_Run_InspectMode(t *testing.T) { mockResolver := mocks.NewMockInputResolver(ctrl) mockEnvFactory := mocks.NewMockEnvironmentFactory(ctrl) mockLogger := mocks.NewMockLogger(ctrl) + mockConnector := mocks.NewMockDaemonConnector(ctrl) g := domain.NewGraph() g.SetRoot(".") task := &domain.Task{Name: domain.NewInternedString("task1"), WorkingDir: domain.NewInternedString("Root")} _ = g.AddTask(task) - a := app.New(mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory). + a := app.New( + mockLoader, mockExecutor, mockLogger, mockStore, mockHasher, mockResolver, mockEnvFactory, mockConnector, + ). WithTeaOptions( tea.WithInput(strings.NewReader("q")), tea.WithOutput(io.Discard), @@ -482,12 +513,14 @@ func TestApp_Run_InspectMode(t *testing.T) { ). WithDisableTick() - mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil) - mockLoader.EXPECT().Load(".").Return(g, nil) + mockLoader.EXPECT().DiscoverRoot(gomock.Any()).Return(tmpDir, nil) + mockConnector.EXPECT().Connect(gomock.Any(), gomock.Any()).Return(nil, errors.New("daemon not available")) + mockResolver.EXPECT().ResolveInputs(gomock.Any(), gomock.Any()).Return([]string{}, nil) + mockLoader.EXPECT().Load(gomock.Any()).Return(g, nil) mockHasher.EXPECT().ComputeInputHash(task, nil, []string{}).Return("hash", nil) - mockStore.EXPECT().Get("task1").Return(nil, nil) + mockStore.EXPECT().Get(".", "task1").Return(nil, nil) mockExecutor.EXPECT().Execute(gomock.Any(), task, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockStore.EXPECT().Put(gomock.Any()).Return(nil) + mockStore.EXPECT().Put(".", gomock.Any()).Return(nil) err = a.Run(context.Background(), []string{"task1"}, app.RunOptions{ NoCache: false, diff --git a/cli/internal/app/node.go b/cli/internal/app/node.go index 54edf5b..187cd11 100644 --- a/cli/internal/app/node.go +++ b/cli/internal/app/node.go @@ -6,6 +6,7 @@ import ( "github.com/grindlemire/graft" "go.trai.ch/same/internal/adapters/cas" //nolint:depguard // Wired in app layer "go.trai.ch/same/internal/adapters/config" //nolint:depguard // Wired in app layer + "go.trai.ch/same/internal/adapters/daemon" //nolint:depguard // Wired in app layer "go.trai.ch/same/internal/adapters/fs" //nolint:depguard // Wired in app layer "go.trai.ch/same/internal/adapters/logger" //nolint:depguard // Wired in app layer "go.trai.ch/same/internal/adapters/nix" //nolint:depguard // Wired in app layer @@ -34,6 +35,7 @@ func init() { fs.HasherNodeID, fs.ResolverNodeID, nix.EnvFactoryNodeID, + daemon.NodeID, }, Run: runAppNode, }) @@ -87,7 +89,12 @@ func runAppNode(ctx context.Context) (*App, error) { return nil, err } - return New(loader, executor, log, store, hasher, resolver, envFactory), nil + connector, err := graft.Dep[ports.DaemonConnector](ctx) + if err != nil { + return nil, err + } + + return New(loader, executor, log, store, hasher, resolver, envFactory, connector), nil } func runComponentsNode(ctx context.Context) (*Components, error) { diff --git a/cli/internal/core/domain/cache.go b/cli/internal/core/domain/cache.go new file mode 100644 index 0000000..b69c649 --- /dev/null +++ b/cli/internal/core/domain/cache.go @@ -0,0 +1,9 @@ +// Package domain contains core domain types for caching. +package domain + +// GraphCacheEntry holds a cached graph with its validation metadata. +type GraphCacheEntry struct { + Graph *Graph + ConfigPaths []string // Paths to same.yaml / same.work.yaml + Mtimes map[string]int64 // path -> mtime in UnixNano +} diff --git a/cli/internal/core/domain/errors.go b/cli/internal/core/domain/errors.go index f383d51..c372b72 100644 --- a/cli/internal/core/domain/errors.go +++ b/cli/internal/core/domain/errors.go @@ -152,4 +152,16 @@ var ( // ErrCacheMiss is returned when a requested item is not found in the cache. ErrCacheMiss = zerr.New("cache miss") + + // ErrDaemonNotRunning is returned when the daemon is not running. + ErrDaemonNotRunning = zerr.New("daemon is not running") + + // ErrDaemonSpawnFailed is returned when spawning the daemon fails. + ErrDaemonSpawnFailed = zerr.New("failed to spawn daemon process") + + // ErrDaemonShutdownFailed is returned when graceful shutdown fails. + ErrDaemonShutdownFailed = zerr.New("failed to shutdown daemon gracefully") + + // ErrDaemonAlreadyRunning is returned when attempting to start a daemon that's already running. + ErrDaemonAlreadyRunning = zerr.New("daemon is already running") ) diff --git a/cli/internal/core/domain/hash_cache.go b/cli/internal/core/domain/hash_cache.go new file mode 100644 index 0000000..7e2b288 --- /dev/null +++ b/cli/internal/core/domain/hash_cache.go @@ -0,0 +1,16 @@ +package domain + +import ( + "time" + "unique" +) + +// TaskHashEntry stores the computed hash and related metadata for a task. +type TaskHashEntry struct { + // Hash is the computed input hash for the task. + Hash string + // ResolvedInputs is the list of resolved input paths at the time of hashing. + ResolvedInputs []unique.Handle[string] + // ComputedAt is the timestamp when the hash was computed. + ComputedAt time.Time +} diff --git a/cli/internal/core/domain/layout.go b/cli/internal/core/domain/layout.go index 995d64f..9bdf3cc 100644 --- a/cli/internal/core/domain/layout.go +++ b/cli/internal/core/domain/layout.go @@ -1,6 +1,9 @@ package domain -import "path/filepath" +import ( + "path/filepath" + "time" +) const ( // SameDirName is the name of the internal workspace directory. @@ -18,6 +21,9 @@ const ( // EnvDirName is the name of the environment cache directory. EnvDirName = "environments" + // DaemonDirName is the name of the daemon directory. + DaemonDirName = "daemon" + // SameFileName is the name of the project configuration file. SameFileName = "same.yaml" @@ -27,6 +33,15 @@ const ( // DebugLogFile is the name of the debug log file. DebugLogFile = "debug.log" + // DaemonSocketName is the name of the daemon Unix socket file. + DaemonSocketName = "daemon.sock" + + // DaemonPIDFileName is the name of the daemon PID file. + DaemonPIDFileName = "daemon.pid" + + // DaemonLogFileName is the name of the daemon log file. + DaemonLogFileName = "daemon.log" + // DirPerm is the default permission for directories (rwxr-x---). DirPerm = 0o750 @@ -35,6 +50,13 @@ const ( // PrivateFilePerm is the default permission for private files (rw-------). PrivateFilePerm = 0o600 + + // SocketPerm is the permission for Unix domain sockets (rwxr-x---). + // This ensures the socket is accessible to the owner and group, but not others. + SocketPerm = 0o750 + + // DaemonInactivityTimeout is the duration after which the daemon shuts down. + DaemonInactivityTimeout = 3 * time.Hour ) // DefaultSamePath returns the default root directory for same metadata. @@ -65,3 +87,18 @@ func DefaultEnvCachePath() string { func DefaultDebugLogPath() string { return filepath.Join(SameDirName, DebugLogFile) } + +// DefaultDaemonSocketPath returns the path to the daemon Unix socket. +func DefaultDaemonSocketPath() string { + return filepath.Join(SameDirName, DaemonDirName, DaemonSocketName) +} + +// DefaultDaemonPIDPath returns the path to the daemon PID file. +func DefaultDaemonPIDPath() string { + return filepath.Join(SameDirName, DaemonDirName, DaemonPIDFileName) +} + +// DefaultDaemonLogPath returns the path to the daemon log file. +func DefaultDaemonLogPath() string { + return filepath.Join(SameDirName, DaemonDirName, DaemonLogFileName) +} diff --git a/cli/internal/core/ports/config_loader.go b/cli/internal/core/ports/config_loader.go index e617d17..668a524 100644 --- a/cli/internal/core/ports/config_loader.go +++ b/cli/internal/core/ports/config_loader.go @@ -8,4 +8,12 @@ import "go.trai.ch/same/internal/core/domain" type ConfigLoader interface { // Load reads the configuration from the given working directory and returns the task graph. Load(cwd string) (*domain.Graph, error) + + // DiscoverConfigPaths finds configuration file paths and their modification times. + // Returns a map of config file paths to their mtime in UnixNano. + DiscoverConfigPaths(cwd string) (map[string]int64, error) + + // DiscoverRoot walks up from cwd to find the workspace root. + // Returns the directory containing same.work.yaml or same.yaml. + DiscoverRoot(cwd string) (string, error) } diff --git a/cli/internal/core/ports/daemon.go b/cli/internal/core/ports/daemon.go new file mode 100644 index 0000000..6287fd5 --- /dev/null +++ b/cli/internal/core/ports/daemon.go @@ -0,0 +1,78 @@ +package ports + +import ( + "context" + "io" + "time" + + "go.trai.ch/same/internal/core/domain" +) + +//go:generate mockgen -source=daemon.go -destination=mocks/mock_daemon.go -package=mocks + +// DaemonStatus represents the current state of the daemon. +type DaemonStatus struct { + Running bool + PID int + Uptime time.Duration + LastActivity time.Time + IdleRemaining time.Duration +} + +// DaemonClient defines the interface for communicating with the daemon. +type DaemonClient interface { + // Ping checks if the daemon is alive and resets the inactivity timer. + Ping(ctx context.Context) error + + // Status returns the current daemon status. + Status(ctx context.Context) (*DaemonStatus, error) + + // Shutdown requests a graceful daemon shutdown. + Shutdown(ctx context.Context) error + + // GetGraph retrieves the task graph from the daemon. + // configMtimes is a map of config file paths to their mtime (UnixNano). + GetGraph( + ctx context.Context, + cwd string, + configMtimes map[string]int64, + ) (graph *domain.Graph, cacheHit bool, err error) + + // GetEnvironment retrieves resolved Nix environment variables. + GetEnvironment( + ctx context.Context, + envID string, + tools map[string]string, + ) (envVars []string, cacheHit bool, err error) + + // GetInputHash retrieves the cached or pending input hash for a task. + GetInputHash( + ctx context.Context, + taskName, root string, + env map[string]string, + ) (InputHashResult, error) + + // ExecuteTask runs a task on the daemon and streams output. + ExecuteTask( + ctx context.Context, + task *domain.Task, + nixEnv []string, + stdout, stderr io.Writer, + ) error + + // Close releases client resources. + Close() error +} + +// DaemonConnector manages daemon lifecycle from the CLI perspective. +type DaemonConnector interface { + // Connect returns a client to the daemon, spawning it if necessary. + // root is the workspace root directory where the daemon operates. + Connect(ctx context.Context, root string) (DaemonClient, error) + + // IsRunning checks if the daemon process is currently running at the given root. + IsRunning(root string) bool + + // Spawn starts a new daemon process in the background at the given root. + Spawn(ctx context.Context, root string) error +} diff --git a/cli/internal/core/ports/input_hash_cache.go b/cli/internal/core/ports/input_hash_cache.go new file mode 100644 index 0000000..993bce0 --- /dev/null +++ b/cli/internal/core/ports/input_hash_cache.go @@ -0,0 +1,35 @@ +package ports + +// InputHashState represents the state of an input hash computation. +type InputHashState uint8 + +const ( + // HashReady indicates the hash has been computed and is available. + HashReady InputHashState = iota + // HashPending indicates the hash is currently being computed. + HashPending + // HashUnknown indicates the hash state is unknown (typically means not yet cached). + HashUnknown +) + +// InputHashResult contains the result of an input hash query. +type InputHashResult struct { + // State indicates the current state of the hash computation. + State InputHashState + // Hash is the computed hash (only valid when State is HashReady). + Hash string +} + +// InputHashCache defines the interface for caching and managing input hashes. +// +//go:generate mockgen -destination=mocks/mock_input_hash_cache.go -package=mocks -source=input_hash_cache.go +type InputHashCache interface { + // GetInputHash returns the current hash state and value for the given task. + // root and env are provided per-request to avoid race conditions when multiple + // requests query hashes for the same task in different contexts. + // It returns HashUnknown if the task has not been cached yet. + GetInputHash(taskName, root string, env map[string]string) InputHashResult + // Invalidate marks cached hashes for tasks affected by the changed paths. + // This should be called when files are modified. + Invalidate(paths []string) +} diff --git a/cli/internal/core/ports/store.go b/cli/internal/core/ports/store.go index 1adbdf2..c6d6045 100644 --- a/cli/internal/core/ports/store.go +++ b/cli/internal/core/ports/store.go @@ -8,8 +8,8 @@ import "go.trai.ch/same/internal/core/domain" type BuildInfoStore interface { // Get retrieves the build info for a given task name. // Returns nil, nil if not found. - Get(taskName string) (*domain.BuildInfo, error) + Get(root, taskName string) (*domain.BuildInfo, error) // Put stores the build info. - Put(info domain.BuildInfo) error + Put(root string, info domain.BuildInfo) error } diff --git a/cli/internal/core/ports/watcher.go b/cli/internal/core/ports/watcher.go new file mode 100644 index 0000000..6e70ab8 --- /dev/null +++ b/cli/internal/core/ports/watcher.go @@ -0,0 +1,39 @@ +package ports + +import ( + "context" + "iter" +) + +// WatchOp represents the type of file system operation. +type WatchOp uint8 + +const ( + // OpCreate indicates a file or directory was created. + OpCreate WatchOp = iota + // OpWrite indicates a file was modified. + OpWrite + // OpRemove indicates a file or directory was removed. + OpRemove + // OpRename indicates a file or directory was renamed. + OpRename +) + +// WatchEvent represents a file system event from the watcher. +type WatchEvent struct { + // Path is the absolute path of the file or directory that changed. + Path string + // Operation is the type of change that occurred. + Operation WatchOp +} + +// Watcher defines the interface for watching file system changes. +type Watcher interface { + // Start begins watching the given root directory recursively. + // It returns an error if the watcher fails to start. + Start(ctx context.Context, root string) error + // Stop stops the watcher and releases all resources. + Stop() error + // Events returns an iterator of file system events. + Events() iter.Seq[WatchEvent] +} diff --git a/cli/internal/engine/scheduler/scheduler.go b/cli/internal/engine/scheduler/scheduler.go index 1c6548b..1b40a1b 100644 --- a/cli/internal/engine/scheduler/scheduler.go +++ b/cli/internal/engine/scheduler/scheduler.go @@ -3,6 +3,7 @@ package scheduler import ( "context" "errors" + "io" "os" "path/filepath" "runtime" @@ -15,6 +16,8 @@ import ( "go.trai.ch/same/internal/core/ports" "go.trai.ch/zerr" "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // TaskStatus represents the status of a task. @@ -39,6 +42,8 @@ type Scheduler struct { resolver ports.InputResolver tracer ports.Tracer envFactory ports.EnvironmentFactory + daemon ports.DaemonClient + noDaemon bool // When true, skip remote execution mu sync.RWMutex taskStatus map[domain.InternedString]TaskStatus @@ -67,6 +72,18 @@ func NewScheduler( return s } +// WithDaemon sets the daemon client for the scheduler and returns itself for chaining. +func (s *Scheduler) WithDaemon(daemon ports.DaemonClient) *Scheduler { + s.daemon = daemon + return s +} + +// WithNoDaemon sets whether to skip remote execution and returns itself for chaining. +func (s *Scheduler) WithNoDaemon(noDaemon bool) *Scheduler { + s.noDaemon = noDaemon + return s +} + // initTaskStatuses initializes the status of tasks in the graph to Pending. func (s *Scheduler) initTaskStatuses(tasks []domain.InternedString) { s.mu.Lock() @@ -78,10 +95,10 @@ func (s *Scheduler) initTaskStatuses(tasks []domain.InternedString) { } // updateStatus updates the status of a task. -func (s *Scheduler) updateStatus(name domain.InternedString, status TaskStatus) { +func (s *Scheduler) updateStatus(name domain.InternedString, taskStatus TaskStatus) { s.mu.Lock() defer s.mu.Unlock() - s.taskStatus[name] = status + s.taskStatus[name] = taskStatus } // Run executes the tasks in the graph with the specified parallelism. @@ -267,6 +284,8 @@ func (state *schedulerRunState) runExecutionLoop() error { } // prepareEnvironments resolves all required environments concurrently. +// +//nolint:cyclop // complexity due to daemon fallback logic func (state *schedulerRunState) prepareEnvironments(ctx context.Context) error { // Identify unique environment IDs needed for this run neededEnvIDs := make(map[string]map[string]string) // envID -> tools map (sample) @@ -311,7 +330,21 @@ func (state *schedulerRunState) prepareEnvironments(ctx context.Context) error { return nil } - env, err := state.s.envFactory.GetEnvironment(ctx, item.tools) + var env []string + var err error + + // Try to use daemon client if available + if state.s.daemon != nil { + env, _, err = state.s.daemon.GetEnvironment(ctx, item.id, item.tools) + if err != nil { + // Fallback to local factory on daemon error + env, err = state.s.envFactory.GetEnvironment(ctx, item.tools) + } + } else { + // Use local factory when daemon is not available + env, err = state.s.envFactory.GetEnvironment(ctx, item.tools) + } + if err != nil { return zerr.Wrap(err, "failed to hydrate environment") } @@ -420,6 +453,10 @@ func (state *schedulerRunState) schedule() { } func (state *schedulerRunState) executeTask(t *domain.Task) { + state.executeWithStrategy(t) +} + +func (state *schedulerRunState) executeWithStrategy(t *domain.Task) { // Execute the task logic within a function to ensure the span is ended // BEFORE we send the result to the channel. This prevents race conditions // in tests where the scheduler loop finishes before the span is recorded. @@ -471,8 +508,8 @@ func (state *schedulerRunState) executeTask(t *domain.Task) { env = cachedEnv.([]string) } - // Step 4: Execute - err = state.s.executor.Execute(ctx, t, env, span, span) + // Step 4: Execute (Remote or Local) + err = state.executeWithFallback(ctx, t, env, span, span) if err != nil { span.RecordError(err) } @@ -488,6 +525,49 @@ func (state *schedulerRunState) executeTask(t *domain.Task) { state.resultsCh <- res } +func (state *schedulerRunState) executeWithFallback( + ctx context.Context, + t *domain.Task, + env []string, + stdout, stderr io.Writer, +) error { + var execErr error + + // Try remote execution via daemon if available and not disabled + if state.s.daemon != nil && !state.s.noDaemon { + execErr = state.s.daemon.ExecuteTask(ctx, t, env, stdout, stderr) + if execErr != nil && isConnectionError(execErr) { + // Fallback to local on connection errors only + execErr = state.s.executor.Execute(ctx, t, env, stdout, stderr) + } + } else { + // Local execution + execErr = state.s.executor.Execute(ctx, t, env, stdout, stderr) + } + + return execErr +} + +// isConnectionError checks if the error is a gRPC connection-related error. +func isConnectionError(err error) bool { + if err == nil { + return false + } + + // Unwrap error chain to handle wrapped gRPC errors + for unwrapped := err; unwrapped != nil; unwrapped = errors.Unwrap(unwrapped) { + st, ok := status.FromError(unwrapped) + if ok { + // Check for codes indicating connection issues + switch st.Code() { + case codes.Unavailable, codes.DeadlineExceeded: + return true + } + } + } + return false +} + func (state *schedulerRunState) computeInputHash(t *domain.Task) (skipped bool, hash string, err error) { // If task is configured to always rebuild, bypass cache if t.RebuildStrategy == domain.RebuildAlways { @@ -576,7 +656,7 @@ func (state *schedulerRunState) handleSuccess(res result) { if !res.skipped { outputHash := state.computeOutputHash(res) if outputHash != "" || len(res.taskOutputs) == 0 { - err := state.s.store.Put(domain.BuildInfo{ + err := state.s.store.Put(state.graph.Root(), domain.BuildInfo{ TaskName: res.task.String(), InputHash: res.inputHash, OutputHash: outputHash, @@ -658,7 +738,7 @@ func (s *Scheduler) checkTaskCache( } // Step B: Get Build Info from Store - info, err := s.store.Get(task.Name.String()) + info, err := s.store.Get(root, task.Name.String()) if err != nil { return false, hash, zerr.Wrap(err, domain.ErrStoreReadFailed.Error()) } diff --git a/cli/internal/engine/scheduler/scheduler_env_test.go b/cli/internal/engine/scheduler/scheduler_env_test.go index 942ff7b..0424917 100644 --- a/cli/internal/engine/scheduler/scheduler_env_test.go +++ b/cli/internal/engine/scheduler/scheduler_env_test.go @@ -63,14 +63,14 @@ func TestScheduler_Execute_UsesEnvFactory(t *testing.T) { mockHasher.EXPECT().ComputeInputHash(task, gomock.Any(), gomock.Any()).Return("hash1", nil) // 3. Cache Check - mockStore.EXPECT().Get("build").Return(nil, nil) + mockStore.EXPECT().Get(".", "build").Return(nil, nil) // 5. Execution with Env mockExec.EXPECT().Execute(ctx, task, expectedEnv, gomock.Any(), gomock.Any()).Return(nil) // 6. Output Hashing & Store Put mockHasher.EXPECT().ComputeOutputHash(gomock.Any(), ".").Return("outHash", nil) - mockStore.EXPECT().Put(gomock.Any()).Return(nil) + mockStore.EXPECT().Put(".", gomock.Any()).Return(nil) err := s.Run(ctx, g, []string{"build"}, 1, false) require.NoError(t, err) diff --git a/cli/internal/engine/scheduler/scheduler_test.go b/cli/internal/engine/scheduler/scheduler_test.go index 36530a7..81b9fca 100644 --- a/cli/internal/engine/scheduler/scheduler_test.go +++ b/cli/internal/engine/scheduler/scheduler_test.go @@ -90,8 +90,8 @@ func TestScheduler_Run_Diamond(t *testing.T) { mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil).Times(3) mockHasher.EXPECT().ComputeInputHash(gomock.Any(), nil, []string{}).Return("hash", nil).Times(3) - mockStore.EXPECT().Get(gomock.Any()).Return(nil, nil).Times(3) - mockStore.EXPECT().Put(gomock.Any()).Return(nil).Times(2) + mockStore.EXPECT().Get(".", gomock.Any()).Return(nil, nil).Times(3) + mockStore.EXPECT().Put(".", gomock.Any()).Return(nil).Times(2) mockExec.EXPECT().Execute(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( func(_ context.Context, task *domain.Task, _ []string, _, _ io.Writer) error { @@ -133,6 +133,7 @@ func TestScheduler_Run_Diamond(t *testing.T) { close(bProceed) close(cProceed) + synctest.Wait() err := <-errCh if err == nil { @@ -190,8 +191,8 @@ func TestScheduler_Run_Partial(t *testing.T) { mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil).Times(3) mockHasher.EXPECT().ComputeInputHash(gomock.Any(), nil, []string{}).Return("hash", nil).Times(3) - mockStore.EXPECT().Get(gomock.Any()).Return(nil, nil).Times(3) - mockStore.EXPECT().Put(gomock.Any()).Return(nil).Times(3) + mockStore.EXPECT().Get(".", gomock.Any()).Return(nil, nil).Times(3) + mockStore.EXPECT().Put(".", gomock.Any()).Return(nil).Times(3) executedTasks := make(map[string]bool) var mu sync.Mutex @@ -247,8 +248,8 @@ func TestScheduler_Run_ExplicitAll(t *testing.T) { mockResolver.EXPECT().ResolveInputs(gomock.Any(), ".").Return([]string{}, nil).Times(3) mockHasher.EXPECT().ComputeInputHash(gomock.Any(), nil, []string{}).Return("hash", nil).Times(3) - mockStore.EXPECT().Get(gomock.Any()).Return(nil, nil).Times(3) - mockStore.EXPECT().Put(gomock.Any()).Return(nil).Times(3) + mockStore.EXPECT().Get(".", gomock.Any()).Return(nil, nil).Times(3) + mockStore.EXPECT().Put(".", gomock.Any()).Return(nil).Times(3) mockExec.EXPECT().Execute(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(3) @@ -340,7 +341,7 @@ func TestScheduler_CheckTaskCache(t *testing.T) { t.Run("CacheHit", func(t *testing.T) { mockResolver.EXPECT().ResolveInputs([]string{}, ".").Return([]string{}, nil) mockHasher.EXPECT().ComputeInputHash(task, task.Environment, []string{}).Return(testHash, nil) - mockStore.EXPECT().Get("test-task").Return(&domain.BuildInfo{ + mockStore.EXPECT().Get(".", "test-task").Return(&domain.BuildInfo{ TaskName: "test-task", InputHash: testHash, OutputHash: outputHash, @@ -357,7 +358,7 @@ func TestScheduler_CheckTaskCache(t *testing.T) { t.Run("CacheMiss_InputMismatch", func(t *testing.T) { mockResolver.EXPECT().ResolveInputs([]string{}, ".").Return([]string{}, nil) mockHasher.EXPECT().ComputeInputHash(task, task.Environment, []string{}).Return(testHash, nil) - mockStore.EXPECT().Get("test-task").Return(&domain.BuildInfo{ + mockStore.EXPECT().Get(".", "test-task").Return(&domain.BuildInfo{ TaskName: "test-task", InputHash: "old-hash", }, nil) @@ -402,10 +403,10 @@ func TestScheduler_Run_Caching(t *testing.T) { mockResolver.EXPECT().ResolveInputs([]string{}, ".").Return([]string{}, nil) mockHasher.EXPECT().ComputeInputHash(task, task.Environment, []string{}).Return(hash1, nil) - mockStore.EXPECT().Get("build").Return(nil, nil) + mockStore.EXPECT().Get(".", "build").Return(nil, nil) mockExec.EXPECT().Execute(ctx, task, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) mockHasher.EXPECT().ComputeOutputHash([]string{"out"}, ".").Return(outputHash, nil) - mockStore.EXPECT().Put(gomock.Any()).Return(nil) + mockStore.EXPECT().Put(".", gomock.Any()).Return(nil) err := s.Run(ctx, g, []string{"build"}, 1, false) require.NoError(t, err) @@ -419,7 +420,7 @@ func TestScheduler_Run_Caching(t *testing.T) { mockResolver.EXPECT().ResolveInputs([]string{}, ".").Return([]string{}, nil) mockHasher.EXPECT().ComputeInputHash(task, task.Environment, []string{}).Return(hash1, nil) - mockStore.EXPECT().Get("build").Return(&domain.BuildInfo{ + mockStore.EXPECT().Get(".", "build").Return(&domain.BuildInfo{ TaskName: "build", InputHash: hash1, OutputHash: outputHash, @@ -462,7 +463,7 @@ func setupCacheBypassTest(t *testing.T, rebuildStrategy domain.RebuildStrategy, mockResolver.EXPECT().ResolveInputs([]string{}, ".").Return([]string{}, nil) mockHasher.EXPECT().ComputeInputHash(task, task.Environment, []string{}).Return("hash1", nil) mockExec.EXPECT().Execute(ctx, task, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockStore.EXPECT().Put(gomock.Any()).Return(nil) + mockStore.EXPECT().Put(".", gomock.Any()).Return(nil) err := s.Run(ctx, g, []string{"build"}, 1, noCache) require.NoError(t, err) @@ -516,7 +517,7 @@ func TestScheduler_RebuildStrategy(t *testing.T) { // RebuildOnChange should call Store.Get mockResolver.EXPECT().ResolveInputs([]string{}, ".").Return([]string{}, nil) mockHasher.EXPECT().ComputeInputHash(task, task.Environment, []string{}).Return(hash1, nil) - mockStore.EXPECT().Get("build").Return(&domain.BuildInfo{ + mockStore.EXPECT().Get(".", "build").Return(&domain.BuildInfo{ TaskName: "build", InputHash: hash1, OutputHash: outputHash, diff --git a/cli/internal/wiring/wiring.go b/cli/internal/wiring/wiring.go index 2b0eb56..f7c637f 100644 --- a/cli/internal/wiring/wiring.go +++ b/cli/internal/wiring/wiring.go @@ -5,6 +5,7 @@ import ( // Register adapter nodes. _ "go.trai.ch/same/internal/adapters/cas" _ "go.trai.ch/same/internal/adapters/config" + _ "go.trai.ch/same/internal/adapters/daemon" _ "go.trai.ch/same/internal/adapters/fs" _ "go.trai.ch/same/internal/adapters/logger" _ "go.trai.ch/same/internal/adapters/nix" diff --git a/flake.nix b/flake.nix index d83c05f..c23cd87 100644 --- a/flake.nix +++ b/flake.nix @@ -34,6 +34,7 @@ mockgen gci gofumpt + buf ]; in @@ -43,7 +44,7 @@ inherit version; src = ./cli; - vendorHash = "sha256-O9y+DIxt8YcqlP499Ns5ECHEWV2IENy6nAH25Leh1AI="; + vendorHash = "sha256-bPO2Kqn45RNR9H5bf+Tfsqa9h7PtHSVEPTWqxx9+pc0="; env.CGO_ENABLED = 0; diff --git a/go.work.sum b/go.work.sum index 74d82b8..5483c55 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1,3 +1,12 @@ +cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= @@ -12,26 +21,65 @@ github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a h1:G99k github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY= github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= +github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA= github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=