diff --git a/fact-ebpf/src/bpf/builtins.h b/fact-ebpf/src/bpf/builtins.h deleted file mode 100644 index e35fae6d..00000000 --- a/fact-ebpf/src/bpf/builtins.h +++ /dev/null @@ -1,7 +0,0 @@ -#pragma once - -// clang-format off -#ifndef memcpy -#define memcpy __builtin_memcpy -#endif -// clang-format on diff --git a/fact-ebpf/src/bpf/main.c b/fact-ebpf/src/bpf/main.c index 659814bf..e4b5395e 100644 --- a/fact-ebpf/src/bpf/main.c +++ b/fact-ebpf/src/bpf/main.c @@ -3,7 +3,6 @@ #include "file.h" #include "types.h" -#include "process.h" #include "maps.h" #include "events.h" #include "bound_path.h" @@ -103,3 +102,38 @@ int BPF_PROG(trace_path_unlink, struct path* dir, struct dentry* dentry) { m->path_unlink.error++; return 0; } + +SEC("tp_btf/cgroup_attach_task") +int BPF_PROG(trace_cgroup_attach_task, struct cgroup* dst_cgrp, const char* path, struct task_struct* _task, bool _threadgroup) { + struct metrics_t* m = get_metrics(); + if (m == NULL) { + bpf_printk("Failed to get metrics entry"); + return 0; + } + + m->cgroup_attach_task.total++; + + u64 id = dst_cgrp->kn->id; + if (bpf_map_lookup_elem(&cgroup_map, &id) != NULL) { + // Already have the entry + m->cgroup_attach_task.ignored++; + return 0; + } + + struct helper_t* helper = get_helper(); + if (helper == NULL) { + bpf_printk("Failed to get helper entry"); + m->cgroup_attach_task.error++; + return 0; + } + + bpf_core_read_str(helper->cgroup_entry.path, PATH_MAX, path); + helper->cgroup_entry.parsed = false; + int res = bpf_map_update_elem(&cgroup_map, &id, &helper->cgroup_entry, BPF_NOEXIST); + if (res != 0) { + bpf_printk("Failed to update path for %d", id); + m->cgroup_attach_task.error++; + } + + return 0; +} diff --git a/fact-ebpf/src/bpf/maps.h b/fact-ebpf/src/bpf/maps.h index 0e9ae4c5..1c88d5d6 100644 --- a/fact-ebpf/src/bpf/maps.h +++ b/fact-ebpf/src/bpf/maps.h @@ -11,8 +11,10 @@ * Helper struct with buffers for various operations */ struct helper_t { - char buf[PATH_MAX * 2]; - const unsigned char* array[16]; + union { + cgroup_entry_t cgroup_entry; + char buf[PATH_MAX * 2]; + }; }; struct { @@ -104,6 +106,13 @@ __always_inline static struct metrics_t* get_metrics() { return bpf_map_lookup_elem(&metrics, &zero); } +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, __u64); + __type(value, cgroup_entry_t); + __uint(max_entries, (2^16)-1); +} cgroup_map SEC(".maps"); + uint64_t host_mount_ns; volatile const bool path_unlink_supports_bpf_d_path; diff --git a/fact-ebpf/src/bpf/process.h b/fact-ebpf/src/bpf/process.h index 910f4825..f51c30c0 100644 --- a/fact-ebpf/src/bpf/process.h +++ b/fact-ebpf/src/bpf/process.h @@ -11,72 +11,6 @@ #include // clang-format on -__always_inline static const char* get_memory_cgroup(struct helper_t* helper) { - if (!bpf_core_enum_value_exists(enum cgroup_subsys_id, memory_cgrp_id)) { - return NULL; - } - - struct task_struct* task = (struct task_struct*)bpf_get_current_task(); - - // We're guessing which cgroup controllers are enabled for this task. The - // assumption is that memory controller is present more often than - // cpu & cpuacct. - struct kernfs_node* kn = BPF_CORE_READ(task, cgroups, subsys[memory_cgrp_id], cgroup, kn); - if (kn == NULL) { - return NULL; - } - - int i = 0; - for (; i < 16; i++) { - helper->array[i] = (const unsigned char*)BPF_CORE_READ(kn, name); - if (bpf_core_field_exists(kn->__parent)) { - kn = BPF_CORE_READ(kn, __parent); - } else { - struct kernfs_node___pre6_15 { - struct kernfs_node* parent; - }; - struct kernfs_node___pre6_15* kn_old = (void*)kn; - kn = BPF_CORE_READ(kn_old, parent); - } - if (kn == NULL) { - break; - } - } - - if (i == 16) { - i--; - } - - int offset = 0; - for (; i >= 0 && offset < PATH_MAX; i--) { - // Skip empty directories - if (helper->array[i] == NULL) { - continue; - } - - helper->buf[offset & (PATH_MAX - 1)] = '/'; - if (++offset >= PATH_MAX) { - return NULL; - } - - int len = bpf_probe_read_kernel_str(&helper->buf[offset & (PATH_MAX - 1)], PATH_MAX, helper->array[i]); - if (len < 0) { - // We should have skipped all empty entries, any other error is a genuine - // problem, stop processing. - return NULL; - } - - if (len == 1) { - offset--; - continue; - } - - offset += len - 1; - } - - return helper->buf; -} - __always_inline static void process_fill_lineage(process_t* p, struct helper_t* helper, bool use_bpf_d_path) { struct task_struct* task = (struct task_struct*)bpf_get_current_task_btf(); p->lineage_len = 0; @@ -109,6 +43,7 @@ __always_inline static int64_t process_fill(process_t* p, bool use_bpf_d_path) { p->gid = (uid_gid >> 32) & 0xFFFFFFFF; p->login_uid = task->loginuid.val; p->pid = (bpf_get_current_pid_tgid() >> 32) & 0xFFFFFFFF; + p->cgroup_id = bpf_get_current_cgroup_id(); u_int64_t err = bpf_get_current_comm(p->comm, TASK_COMM_LEN); if (err != 0) { bpf_printk("Failed to fill task comm"); @@ -133,11 +68,6 @@ __always_inline static int64_t process_fill(process_t* p, bool use_bpf_d_path) { d_path(&task->mm->exe_file->f_path, p->exe_path, PATH_MAX, use_bpf_d_path); - const char* cg = get_memory_cgroup(helper); - if (cg != NULL) { - bpf_probe_read_str(p->memory_cgroup, PATH_MAX, cg); - } - p->in_root_mount_ns = get_mount_ns() == host_mount_ns; process_fill_lineage(p, helper, use_bpf_d_path); diff --git a/fact-ebpf/src/bpf/types.h b/fact-ebpf/src/bpf/types.h index f32ade10..58099ad7 100644 --- a/fact-ebpf/src/bpf/types.h +++ b/fact-ebpf/src/bpf/types.h @@ -22,7 +22,7 @@ typedef struct process_t { char args[4096]; unsigned int args_len; char exe_path[PATH_MAX]; - char memory_cgroup[PATH_MAX]; + unsigned long long cgroup_id; unsigned int uid; unsigned int gid; unsigned int login_uid; @@ -73,4 +73,10 @@ struct metrics_by_hook_t { struct metrics_t { struct metrics_by_hook_t file_open; struct metrics_by_hook_t path_unlink; + struct metrics_by_hook_t cgroup_attach_task; }; + +typedef struct cgroup_entry_t { + char parsed; + char path[PATH_MAX]; +} cgroup_entry_t; diff --git a/fact-ebpf/src/lib.rs b/fact-ebpf/src/lib.rs index da25ef83..eb186642 100644 --- a/fact-ebpf/src/lib.rs +++ b/fact-ebpf/src/lib.rs @@ -81,11 +81,13 @@ impl metrics_t { let mut m = metrics_t { ..*self }; m.file_open = m.file_open.accumulate(&other.file_open); m.path_unlink = m.path_unlink.accumulate(&other.path_unlink); + m.cgroup_attach_task = m.cgroup_attach_task.accumulate(&other.cgroup_attach_task); m } } unsafe impl Pod for metrics_t {} +unsafe impl Pod for cgroup_entry_t {} pub const EBPF_OBJ: &[u8] = aya::include_bytes_aligned!(concat!(env!("OUT_DIR"), "/main.o")); pub const CHECKS_OBJ: &[u8] = aya::include_bytes_aligned!(concat!(env!("OUT_DIR"), "/checks.o")); diff --git a/fact/src/bpf/mod.rs b/fact/src/bpf/mod.rs index 3b6e27ec..c07c8854 100644 --- a/fact/src/bpf/mod.rs +++ b/fact/src/bpf/mod.rs @@ -3,7 +3,7 @@ use std::{io, path::PathBuf, sync::Arc}; use anyhow::{bail, Context}; use aya::{ maps::{Array, LpmTrie, MapData, PerCpuArray, RingBuf}, - programs::Lsm, + programs::Program, Btf, Ebpf, }; use checks::Checks; @@ -15,9 +15,13 @@ use tokio::{ task::JoinHandle, }; -use crate::{event::Event, host_info, metrics::EventCounter}; +use crate::{ + event::{self, Event}, + host_info, + metrics::EventCounter, +}; -use fact_ebpf::{event_t, metrics_t, path_prefix_t, LPM_SIZE_MAX}; +use fact_ebpf::{cgroup_entry_t, event_t, metrics_t, path_prefix_t, LPM_SIZE_MAX}; mod checks; @@ -30,6 +34,8 @@ pub struct Bpf { paths: Vec, paths_config: watch::Receiver>, + + event_parser: event::parser::Parser, } impl Bpf { @@ -44,7 +50,7 @@ impl Bpf { // Include the BPF object as raw bytes at compile-time and load it // at runtime. - let obj = aya::EbpfLoader::new() + let mut obj = aya::EbpfLoader::new() .set_global("host_mount_ns", &host_info::get_host_mount_ns(), true) .set_global( "path_unlink_supports_bpf_d_path", @@ -56,11 +62,17 @@ impl Bpf { let paths = Vec::new(); let (tx, _) = broadcast::channel(100); + let Some(cgroup_map) = obj.take_map("cgroup_map") else { + bail!("Failed to get cgroup_map"); + }; + let cgroup_map: aya::maps::HashMap = cgroup_map.try_into()?; + let event_parser = event::parser::Parser::new(cgroup_map); let mut bpf = Bpf { obj, tx, paths, paths_config, + event_parser, }; bpf.load_paths()?; @@ -138,24 +150,42 @@ impl Bpf { Ok(()) } - fn load_lsm_prog(&mut self, name: &str, hook: &str, btf: &Btf) -> anyhow::Result<()> { + fn load_prog(&mut self, name: &str, hook: &str, btf: &Btf) -> anyhow::Result<()> { let Some(prog) = self.obj.program_mut(name) else { bail!("{name} program not found"); }; - let prog: &mut Lsm = prog.try_into()?; - prog.load(hook, btf)?; + match prog { + Program::Lsm(prog) => prog.load(hook, btf)?, + Program::BtfTracePoint(prog) => prog.load(hook, btf)?, + _ => todo!(), + } Ok(()) } fn load_progs(&mut self, btf: &Btf) -> anyhow::Result<()> { - self.load_lsm_prog("trace_file_open", "file_open", btf)?; - self.load_lsm_prog("trace_path_unlink", "path_unlink", btf) + let progs = [ + ("trace_file_open", "file_open"), + ("trace_path_unlink", "path_unlink"), + ("trace_cgroup_attach_task", "cgroup_attach_task"), + ]; + + for (name, hook) in progs { + self.load_prog(name, hook, btf)?; + } + Ok(()) } fn attach_progs(&mut self) -> anyhow::Result<()> { for (_, prog) in self.obj.programs_mut() { - let prog: &mut Lsm = prog.try_into()?; - prog.attach()?; + match prog { + Program::Lsm(prog) => { + prog.attach()?; + } + Program::BtfTracePoint(prog) => { + prog.attach()?; + } + _ => todo!(), + } } Ok(()) } @@ -165,8 +195,10 @@ impl Bpf { mut self, mut running: watch::Receiver, event_counter: EventCounter, + parser_counter: EventCounter, ) -> JoinHandle> { info!("Starting BPF worker..."); + self.event_parser.set_metrics(parser_counter); tokio::spawn(async move { self.attach_progs() @@ -183,7 +215,7 @@ impl Bpf { let ringbuf = guard.get_inner_mut(); while let Some(event) = ringbuf.next() { let event: &event_t = unsafe { &*(event.as_ptr() as *const _) }; - let event = match Event::try_from(event) { + let event = match self.event_parser.parse(event) { Ok(event) => Arc::new(event), Err(e) => { error!("Failed to parse event: '{e}'"); @@ -268,7 +300,11 @@ mod bpf_tests { // Create a metrics exporter, but don't start it let exporter = Exporter::new(bpf.take_metrics().unwrap()); - let handle = bpf.start(run_rx, exporter.metrics.bpf_worker.clone()); + let handle = bpf.start( + run_rx, + exporter.metrics.bpf_worker.clone(), + exporter.metrics.event_parser.clone(), + ); tokio::time::sleep(Duration::from_millis(500)).await; @@ -277,7 +313,7 @@ mod bpf_tests { NamedTempFile::new_in(monitored_path).expect("Failed to create temporary file"); println!("Created {file:?}"); - let expected = Event::new( + let expected = Event::from_raw_parts( file_activity_type_t::FILE_ACTIVITY_CREATION, host_info::get_hostname(), file.path().to_path_buf(), diff --git a/fact/src/event/mod.rs b/fact/src/event/mod.rs index 18b6ea34..725fd790 100644 --- a/fact/src/event/mod.rs +++ b/fact/src/event/mod.rs @@ -9,6 +9,7 @@ use fact_ebpf::{event_t, file_activity_type_t, PATH_MAX}; use crate::host_info; use process::Process; +pub(crate) mod parser; pub(crate) mod process; fn slice_to_string(s: &[c_char]) -> anyhow::Result { @@ -30,8 +31,21 @@ pub struct Event { } impl Event { + pub fn new(event: &event_t, container_id: Option) -> anyhow::Result { + let process = Process::new(event.process, container_id)?; + let timestamp = host_info::get_boot_time() + event.timestamp; + let file = FileData::new(event.type_, event.filename, event.host_file)?; + + Ok(Event { + timestamp, + hostname: host_info::get_hostname(), + process, + file, + }) + } + #[cfg(test)] - pub fn new( + pub fn from_raw_parts( event_type: file_activity_type_t, hostname: &'static str, filename: PathBuf, @@ -62,23 +76,6 @@ impl Event { } } -impl TryFrom<&event_t> for Event { - type Error = anyhow::Error; - - fn try_from(value: &event_t) -> Result { - let process = Process::try_from(value.process)?; - let timestamp = host_info::get_boot_time() + value.timestamp; - let file = FileData::new(value.type_, value.filename, value.host_file)?; - - Ok(Event { - timestamp, - hostname: host_info::get_hostname(), - process, - file, - }) - } -} - impl From for fact_api::FileActivity { fn from(value: Event) -> Self { let file = fact_api::file_activity::File::from(value.file); diff --git a/fact/src/event/parser.rs b/fact/src/event/parser.rs new file mode 100644 index 00000000..c6ebdf09 --- /dev/null +++ b/fact/src/event/parser.rs @@ -0,0 +1,216 @@ +use std::{ + ffi::{c_char, CStr}, + os::unix::fs::DirEntryExt, + path::Path, +}; + +use aya::maps::{MapData, MapError}; +use fact_ebpf::{cgroup_entry_t, event_t}; +use log::warn; + +use crate::{host_info, metrics::EventCounter}; + +use super::Event; + +pub struct Parser { + cgroup_map: aya::maps::HashMap, + metrics: Option, +} + +impl Parser { + pub fn new(mut cgroup_map: aya::maps::HashMap) -> Self { + for fs in host_info::get_cgroup_paths() { + Parser::fill_in_map(&mut cgroup_map, &fs); + } + Parser { + cgroup_map, + metrics: None, + } + } + + pub fn set_metrics(&mut self, metrics: EventCounter) { + self.metrics = Some(metrics); + } + + fn metrics_added(&self) { + if let Some(metrics) = &self.metrics { + metrics.added(); + } + } + + fn metrics_ignored(&self) { + if let Some(metrics) = &self.metrics { + metrics.ignored(); + } + } + + fn metrics_dropped(&self) { + if let Some(metrics) = &self.metrics { + metrics.dropped(); + } + } + + pub fn parse(&mut self, event: &event_t) -> anyhow::Result { + let container_id = self.get_container_id(&event.process.cgroup_id); + Event::new(event, container_id) + } + + fn fill_in_map(cgroup_map: &mut aya::maps::HashMap, path: &Path) { + for entry in std::fs::read_dir(path).unwrap() { + let entry = match entry { + Ok(entry) => entry, + Err(e) => { + warn!("Failed to read {}: {e}", path.display()); + continue; + } + }; + + let p = entry.path(); + if !p.is_dir() { + continue; + } + let Some(p_str) = p.to_str() else { + warn!("p.to_str() failed"); + continue; + }; + let container_id = Parser::extract_container_id(p_str); + let id = entry.ino(); + let path = unsafe { + let mut path: [c_char; 4096] = std::mem::zeroed(); + if let Some(cid) = &container_id { + std::ptr::copy(cid.as_ptr(), path.as_mut_ptr().cast(), cid.len()); + } + + path + }; + let cid_entry = fact_ebpf::cgroup_entry_t { + parsed: true as c_char, + path, + }; + if let Err(e) = cgroup_map.insert(id, cid_entry, 1) { + warn!("Failed to insert entry for {id}: {e}"); + } + + Parser::fill_in_map(cgroup_map, &p); + } + } + + fn get_container_id(&mut self, k: &u64) -> Option { + self.metrics_added(); + let mut cgroup = match self.cgroup_map.get(k, 0) { + Ok(cgroup) => cgroup, + Err(MapError::KeyNotFound) => { + self.metrics_ignored(); + return None; + } + Err(e) => { + warn!("Failed to retrieve entry for {k}: {e}"); + self.metrics_dropped(); + return None; + } + }; + + if cgroup.parsed != 0 { + let cid = unsafe { CStr::from_ptr(cgroup.path.as_ptr()) }; + let cid = match cid.to_str() { + Ok(cid) => cid, + Err(e) => { + warn!("Failed to read cid for {k}: {e}"); + self.metrics_dropped(); + return None; + } + }; + Some(cid.to_string()) + } else { + cgroup.parsed = true as c_char; + let path = match unsafe { CStr::from_ptr(cgroup.path.as_ptr()) }.to_str() { + Ok(path) => path, + Err(e) => { + warn!("Failed to read path for {k}: {e}"); + self.metrics_dropped(); + return None; + } + }; + let cid = Parser::extract_container_id(path); + + if let Some(cid) = &cid { + unsafe { + std::ptr::copy(cid.as_ptr(), cgroup.path.as_mut_ptr().cast(), cid.len()); + } + cgroup.path[12] = '\0' as c_char; + } else { + cgroup.path[0] = '\0' as c_char; + } + if let Err(e) = self.cgroup_map.insert(k, cgroup, 2) { + warn!("Failed to update entry for {k}: {e}"); + } + cid + } + } + + pub(super) fn extract_container_id(cgroup: &str) -> Option { + let cgroup = if let Some(i) = cgroup.rfind(".scope") { + cgroup.split_at(i).0 + } else { + cgroup + }; + + if cgroup.is_empty() || cgroup.len() < 65 { + return None; + } + + let cgroup = cgroup.split_at(cgroup.len() - 65).1; + let (c, cgroup) = cgroup.split_at(1); + if c != "/" && c != "-" { + return None; + } + + if cgroup.chars().all(|c| c.is_ascii_hexdigit()) { + Some(cgroup.split_at(12).0.to_owned()) + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn extract_container_id() { + let tests = [ + ("", None), + ("init.scope", None), + ( + "/docker/951e643e3c241b225b6284ef2b79a37c13fc64cbf65b5d46bda95fcb98fe63a4", + Some("951e643e3c24".to_string()), + ), + ( + "/kubepods/kubepods/besteffort/pod690705f9-df6e-11e9-8dc5-025000000001/c3bfd81b7da0be97190a74a7d459f4dfa18f57c88765cde2613af112020a1c4b", + Some("c3bfd81b7da0".to_string()), + ), + ( + "/kubepods/burstable/pod7cd3dba6-e475-11e9-8f99-42010a8a00d2/2bc55a8cae1704a733ba5d785d146bbed9610483380507cbf00c96b32bb637e1", + Some("2bc55a8cae17".to_string()), + ), + ( + "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce705797_e47e_11e9_bd71_42010a000002.slice/docker-6525e65814a99d431b6978e8f8c895013176c6c58173b56639d4b020c14e6022.scope", + Some("6525e65814a9".to_string()), + ), + ( + "/machine.slice/libpod-b6e375cfe46efa5cd90d095603dec2de888c28b203285819233040b5cf1212ac.scope/container", + Some("b6e375cfe46e".to_string()), + ), + ( + "/machine.slice/libpod-cbdfa0f1f08763b1963c30d98e11e1f052cb67f1e9b7c0ab8a6ca6c70cbcad69.scope/container/kubelet.slice/kubelet-kubepods.slice/kubelet-kubepods-besteffort.slice/kubelet-kubepods-besteffort-pod6eab3b7b_f0a6_4bb8_bff2_d5bc9017c04b.slice/cri-containerd-5ebf11e02dbde102cda4b76bc0e3849a65f9edac7a12bdabfd34db01b9556101.scope", + Some("5ebf11e02dbd".to_string()), + ), + ]; + + for (input, expected) in tests { + let id = Parser::extract_container_id(input); + assert_eq!(id, expected); + } + } +} diff --git a/fact/src/event/process.rs b/fact/src/event/process.rs index bda63d34..3807b7b2 100644 --- a/fact/src/event/process.rs +++ b/fact/src/event/process.rs @@ -60,6 +60,47 @@ pub struct Process { } impl Process { + pub fn new(proc: process_t, container_id: Option) -> anyhow::Result { + let comm = slice_to_string(proc.comm.as_slice())?; + let exe_path = slice_to_string(proc.exe_path.as_slice())?; + let in_root_mount_ns = proc.in_root_mount_ns != 0; + + let lineage = proc.lineage[..proc.lineage_len as usize] + .iter() + .map(Lineage::try_from) + .collect::, _>>()?; + + let mut converted_args = Vec::new(); + let args_len = proc.args_len as usize; + let mut offset = 0; + while offset < args_len { + let arg = unsafe { CStr::from_ptr(proc.args.as_ptr().add(offset)) } + .to_str()? + .to_owned(); + if arg.is_empty() { + break; + } + offset += arg.len() + 1; + converted_args.push(arg); + } + + let username = host_info::get_username(proc.uid); + + Ok(Process { + comm, + args: converted_args, + exe_path, + container_id, + uid: proc.uid, + username, + gid: proc.gid, + login_uid: proc.login_uid, + pid: proc.pid, + in_root_mount_ns, + lineage, + }) + } + /// Create a representation of the current process as best as /// possible. #[cfg(test)] @@ -73,7 +114,7 @@ impl Process { .unwrap(); let args = std::env::args().collect::>(); let cgroup = std::fs::read_to_string("/proc/self/cgroup").expect("Failed to read cgroup"); - let container_id = Process::extract_container_id(&cgroup); + let container_id = super::parser::Parser::extract_container_id(&cgroup); let uid = unsafe { libc::getuid() }; let gid = unsafe { libc::getgid() }; let pid = std::process::id(); @@ -98,30 +139,6 @@ impl Process { lineage: vec![], } } - - fn extract_container_id(cgroup: &str) -> Option { - let cgroup = if let Some(i) = cgroup.rfind(".scope") { - cgroup.split_at(i).0 - } else { - cgroup - }; - - if cgroup.is_empty() || cgroup.len() < 65 { - return None; - } - - let cgroup = cgroup.split_at(cgroup.len() - 65).1; - let (c, cgroup) = cgroup.split_at(1); - if c != "/" && c != "-" { - return None; - } - - if cgroup.chars().all(|c| c.is_ascii_hexdigit()) { - Some(cgroup.split_at(12).0.to_owned()) - } else { - None - } - } } #[cfg(test)] @@ -137,53 +154,6 @@ impl PartialEq for Process { } } -impl TryFrom for Process { - type Error = anyhow::Error; - - fn try_from(value: process_t) -> Result { - let comm = slice_to_string(value.comm.as_slice())?; - let exe_path = slice_to_string(value.exe_path.as_slice())?; - let memory_cgroup = unsafe { CStr::from_ptr(value.memory_cgroup.as_ptr()) }.to_str()?; - let container_id = Process::extract_container_id(memory_cgroup); - let in_root_mount_ns = value.in_root_mount_ns != 0; - - let lineage = value.lineage[..value.lineage_len as usize] - .iter() - .map(Lineage::try_from) - .collect::, _>>()?; - - let mut converted_args = Vec::new(); - let args_len = value.args_len as usize; - let mut offset = 0; - while offset < args_len { - let arg = unsafe { CStr::from_ptr(value.args.as_ptr().add(offset)) } - .to_str()? - .to_owned(); - if arg.is_empty() { - break; - } - offset += arg.len() + 1; - converted_args.push(arg); - } - - let username = host_info::get_username(value.uid); - - Ok(Process { - comm, - args: converted_args, - exe_path, - container_id, - uid: value.uid, - username, - gid: value.gid, - login_uid: value.login_uid, - pid: value.pid, - in_root_mount_ns, - lineage, - }) - } -} - impl From for fact_api::ProcessSignal { fn from(value: Process) -> Self { let Process { @@ -228,45 +198,3 @@ impl From for fact_api::ProcessSignal { } } } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn extract_container_id() { - let tests = [ - ("", None), - ("init.scope", None), - ( - "/docker/951e643e3c241b225b6284ef2b79a37c13fc64cbf65b5d46bda95fcb98fe63a4", - Some("951e643e3c24".to_string()), - ), - ( - "/kubepods/kubepods/besteffort/pod690705f9-df6e-11e9-8dc5-025000000001/c3bfd81b7da0be97190a74a7d459f4dfa18f57c88765cde2613af112020a1c4b", - Some("c3bfd81b7da0".to_string()), - ), - ( - "/kubepods/burstable/pod7cd3dba6-e475-11e9-8f99-42010a8a00d2/2bc55a8cae1704a733ba5d785d146bbed9610483380507cbf00c96b32bb637e1", - Some("2bc55a8cae17".to_string()), - ), - ( - "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce705797_e47e_11e9_bd71_42010a000002.slice/docker-6525e65814a99d431b6978e8f8c895013176c6c58173b56639d4b020c14e6022.scope", - Some("6525e65814a9".to_string()), - ), - ( - "/machine.slice/libpod-b6e375cfe46efa5cd90d095603dec2de888c28b203285819233040b5cf1212ac.scope/container", - Some("b6e375cfe46e".to_string()), - ), - ( - "/machine.slice/libpod-cbdfa0f1f08763b1963c30d98e11e1f052cb67f1e9b7c0ab8a6ca6c70cbcad69.scope/container/kubelet.slice/kubelet-kubepods.slice/kubelet-kubepods-besteffort.slice/kubelet-kubepods-besteffort-pod6eab3b7b_f0a6_4bb8_bff2_d5bc9017c04b.slice/cri-containerd-5ebf11e02dbde102cda4b76bc0e3849a65f9edac7a12bdabfd34db01b9556101.scope", - Some("5ebf11e02dbd".to_string()), - ), - ]; - - for (input, expected) in tests { - let id = Process::extract_container_id(input); - assert_eq!(id, expected); - } - } -} diff --git a/fact/src/host_info.rs b/fact/src/host_info.rs index 53a316fd..f8e71a00 100644 --- a/fact/src/host_info.rs +++ b/fact/src/host_info.rs @@ -180,3 +180,30 @@ impl SystemInfo { Ok(SystemInfo { kernel, arch }) } } + +pub fn get_cgroup_paths() -> Vec { + let Ok(file) = File::open("/proc/mounts") else { + warn!("Failed to open /proc/mounts"); + return Vec::new(); + }; + + BufReader::new(file) + .lines() + .filter_map(|line| match line { + Ok(line) => Some(line), + Err(e) => { + warn!("Failed to read line from /proc/mounts: {e}"); + None + } + }) + .filter_map(|line| { + let mut parts = line.split(' '); + let fs_type = parts.next()?; + if fs_type == "cgroup" || fs_type == "cgroup2" { + parts.next().map(PathBuf::from) + } else { + None + } + }) + .collect() +} diff --git a/fact/src/lib.rs b/fact/src/lib.rs index d198f67a..131078d9 100644 --- a/fact/src/lib.rs +++ b/fact/src/lib.rs @@ -87,7 +87,11 @@ pub async fn run(config: FactConfig) -> anyhow::Result<()> { reloader.config().json(), )?; endpoints::Server::new(exporter.clone(), reloader.endpoint(), running.subscribe()).start(); - let mut bpf_handle = bpf.start(running.subscribe(), exporter.metrics.bpf_worker.clone()); + let mut bpf_handle = bpf.start( + running.subscribe(), + exporter.metrics.bpf_worker.clone(), + exporter.metrics.event_parser.clone(), + ); reloader.start(running.subscribe()); let mut sigterm = signal(SignalKind::terminate())?; diff --git a/fact/src/metrics/kernel_metrics.rs b/fact/src/metrics/kernel_metrics.rs index d089a1c8..e3c10346 100644 --- a/fact/src/metrics/kernel_metrics.rs +++ b/fact/src/metrics/kernel_metrics.rs @@ -10,6 +10,7 @@ use super::{EventCounter, LabelValues}; pub struct KernelMetrics { file_open: EventCounter, path_unlink: EventCounter, + cgroup_attach_task: EventCounter, map: PerCpuArray, } @@ -25,13 +26,20 @@ impl KernelMetrics { "Events processed by the path_unlink LSM hook", &[], // Labels are not needed since `collect` will add them all ); + let cgroup_attach_task = EventCounter::new( + "kernel_cgroup_attach_task_events", + "Events processed by the cgroup_attach_task LSM hook", + &[], // Labels are not needed since `collect` will add them all + ); file_open.register(reg); path_unlink.register(reg); + cgroup_attach_task.register(reg); KernelMetrics { file_open, path_unlink, + cgroup_attach_task, map: kernel_metrics, } } @@ -78,6 +86,7 @@ impl KernelMetrics { KernelMetrics::refresh_labels(&self.file_open, &metrics.file_open); KernelMetrics::refresh_labels(&self.path_unlink, &metrics.path_unlink); + KernelMetrics::refresh_labels(&self.cgroup_attach_task, &metrics.cgroup_attach_task); Ok(()) } diff --git a/fact/src/metrics/mod.rs b/fact/src/metrics/mod.rs index 38e579b8..3ef49f60 100644 --- a/fact/src/metrics/mod.rs +++ b/fact/src/metrics/mod.rs @@ -97,6 +97,15 @@ impl EventCounter { .unwrap() .inc_by(n); } + + pub fn ignored(&self) { + self.counter + .get(&MetricEvents { + label: LabelValues::Ignored, + }) + .unwrap() + .inc(); + } } #[derive(Debug, Clone)] @@ -135,6 +144,7 @@ impl OutputMetrics { pub struct Metrics { pub bpf_worker: EventCounter, pub output: OutputMetrics, + pub event_parser: EventCounter, } impl Metrics { @@ -150,12 +160,24 @@ impl Metrics { ); bpf_worker.register(registry); + let event_parser = EventCounter::new( + "event_parser_events", + "Metrics for the event parsing process", + &[ + LabelValues::Added, + LabelValues::Dropped, + LabelValues::Ignored, + ], + ); + event_parser.register(registry); + let output_metrics = OutputMetrics::new(); output_metrics.register(registry); Metrics { bpf_worker, output: output_metrics, + event_parser, } } }