Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file added pyplugins/hyperfile/__init__.py
Empty file.
187 changes: 187 additions & 0 deletions pyplugins/hyperfile/devfs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
from penguin import Plugin, plugins
from hyper.portal import PortalCmd
from hyper.consts import HYPER_OP as hop
from typing import List, Dict, Generator, Optional, Tuple
from hyperfile.models.base import DevFile

class Devfs(Plugin):
def __init__(self):
self.outdir = self.get_arg("outdir")
self.proj_dir = self.get_arg("proj_dir")
self._pending_devfs: List[Tuple[str, DevFile, int, int]] = []
self._devfs: Dict[str, DevFile] = {}

# Cache for directory IDs (path -> id). Root "" is ID 0.
self._dev_dirs: Dict[str, int] = {"": 0}

plugins.portal.register_interrupt_handler(
"devfs", self._hyperdevfs_interrupt_handler)

def _get_overridden_methods(self, devfs_file: DevFile) -> Dict[str, callable]:
base = DevFile
overridden = {}
for name in [
"open", "read", "read_iter", "write", "write_iter", "lseek", "release", "poll",
"ioctl", "compat_ioctl", "mmap", "get_unmapped_area",
"flush", "fsync", "fasync", "lock"
]:
meth = getattr(devfs_file, name, None)
base_meth = getattr(base, name, None)
# Check if method is overridden (different code object)
if (
meth is not None and base_meth is not None
and hasattr(meth, "__code__") and hasattr(base_meth, "__code__")
and meth.__code__ is not base_meth.__code__
):
overridden[name] = meth
return overridden

def _make_ops_struct(self, devfs_file: DevFile):
kffi = plugins.kffi
overridden = self._get_overridden_methods(devfs_file)

# Build the initialization dictionary dynamically
init_data = {}
for name, fn in overridden.items():
init_data[name] = yield from kffi.callback(fn)

return kffi.new("struct igloo_dev_ops", init_data)

def register_devfs(self, devfs_file: DevFile, path: Optional[str] = None, major: Optional[int] = None, minor: Optional[int] = None):
if path:
fname = path
else:
fname = getattr(devfs_file, "PATH", None)
devfs_file.PATH = fname

if not fname:
raise ValueError("DevFile must define PATH or define it in register_devfs")

major_num = major if major is not None else getattr(devfs_file, "MAJOR", -1)
minor_num = minor if minor is not None else getattr(devfs_file, "MINOR", 0)

if fname.startswith("/dev/"):
fname = fname[len("/dev/"):] # Remove leading /dev/

# Deduplicate registration
if fname not in self._devfs and devfs_file not in [f for _, f, _, _ in self._pending_devfs]:
plugins.portal.queue_interrupt("devfs")
self._pending_devfs.append((fname, devfs_file, major_num, minor_num))

self._devfs[fname] = devfs_file

def _split_dev_path(self, path: str) -> Tuple[str, str]:
"""
Splits 'a/b/c' into ('a/b', 'c'). Returns ('', 'c') if no slashes.
"""
path = path.strip("/")
if "/" in path:
parent, fname = path.rsplit("/", 1)
return parent, fname
else:
return "", path

def _get_or_create_dev_dir(self, dir_path: str) -> Generator[int, None, int]:
"""
Recursively creates directories via portal and returns the ID of the final directory.
"""
parts = [p for p in dir_path.strip("/").split("/") if p]
if not parts:
return 0 # Root

parent_id = 0
cur_path = ""

for part in parts:
cur_path = cur_path + "/" + part if cur_path else part

# Use cache if available
if cur_path in self._dev_dirs:
parent_id = self._dev_dirs[cur_path]
continue

kffi = plugins.kffi
init_data = {
"name": part.encode("latin-1", errors="ignore"),
"parent_id": parent_id,
"replace": 0
}

req = kffi.new("struct portal_devfs_dir_req", init_data)
req_bytes = bytes(req)

# Ensure HYPER_OP_DEVFS_CREATE_OR_LOOKUP_DIR is defined in your hop consts
result = yield PortalCmd(
hop.HYPER_OP_DEVFS_CREATE_OR_LOOKUP_DIR,
0,
len(req_bytes),
None,
req_bytes
)

if result is None or result < 0:
raise RuntimeError(f"Failed to create/lookup devfs dir '{cur_path}'")

self._dev_dirs[cur_path] = result
parent_id = result

return parent_id

def _register_devfs(self, devfs_list: List[Tuple[str, DevFile, int, int]]) -> Generator[int, None, None]:
for fname, devfs_file, major, minor in devfs_list:

# 1. Resolve path hierarchy
parent_dir, file_name = self._split_dev_path(fname)

try:
parent_id = yield from self._get_or_create_dev_dir(parent_dir)
except RuntimeError as e:
self.logger.error(f"Could not register {fname}: {e}")
continue

# Validate final filename (should be flat now)
if not file_name or "/" in file_name:
self.logger.error(f"Invalid devfs device name after split: '{file_name}'")
continue

ops = yield from self._make_ops_struct(devfs_file)
kffi = plugins.kffi

init_data = {
"name": file_name.encode("latin-1", errors="ignore"),
"major": major,
"minor": minor,
"ops": ops,
"replace": 1,
# Dwarffi safely ignores keys that don't exist on the target struct,
# entirely replacing the need for 'hasattr(req, "parent_id")' checks!
"parent_id": parent_id
}

req = kffi.new("struct portal_devfs_create_req", init_data)
req_bytes = bytes(req)

result = yield PortalCmd(
hop.HYPER_OP_DEVFS_CREATE_DEVICE,
0,
len(req_bytes),
None,
req_bytes
)

if result == 0 or result is None:
self.logger.error(f"Failed to register devfs device '{fname}' (kernel returned 0)")
continue

self.logger.info(f"Registered devfs device '{fname}' with kernel")

def _hyperdevfs_interrupt_handler(self) -> Generator[bool, None, bool]:
if not self._pending_devfs:
return False

pending = self._pending_devfs[:]
self._pending_devfs.clear()
while pending:
devfs = pending.pop(0)
yield from self._register_devfs([devfs])
return False
147 changes: 147 additions & 0 deletions pyplugins/hyperfile/models/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
from wrappers.ptregs_wrap import PtRegsWrapper
from penguin import getColoredLogger


class BaseFile:
"""
The root base class for all file types.
Acts as the 'Argument Sink' to prevent object.__init__ failures.
"""
PATH = None
FS = "unknown"

def __init__(self, *, path: str = None, fs: str = None, **kwargs):
"""
Consumes 'path' and 'fs' arguments.
Swallows any remaining kwargs so object.__init__ doesn't crash.
"""
if path is not None:
self.PATH = path
if fs is not None:
self.FS = fs

# We do not pass kwargs to super() because object() takes no args.
super().__init__()

@property
def full_path(self) -> str:
if self.PATH is None:
return "unknown_path"
pth = self.PATH.lstrip("/")
if self.FS == "procfs":
if pth.startswith("/proc/"):
pth = pth[len("/proc/"):]
return f"/proc/{pth}"
elif self.FS == "devfs":
if pth.startswith("/dev/"):
pth = pth[len("/dev/"):]
return f"/dev/{pth}"
elif self.FS == "sysfs":
if pth.startswith("/sys/"):
pth = pth[len("/sys/"):]
return f"/sys/{pth}"
else:
return self.PATH

@property
def logger(self):
if hasattr(self, "_logger"):
return self._logger
self._logger = getColoredLogger(f"hyperfs.{self.FS}.{self.full_path}")
return self._logger


class VFSFile(BaseFile):
"""
Base class defining the VFS interface.
"""
def open(self, ptregs: PtRegsWrapper, inode: int, file: int) -> None:
pass

def read(self, ptregs: PtRegsWrapper, file: int, user_buf: int, size: int, offset_ptr: int) -> None:
pass

def read_iter(self, ptregs: PtRegsWrapper, kiocb: int, iov_iter: int) -> None:
pass

def write(self, ptregs: PtRegsWrapper, file: int, user_buf: int, size: int, offset_ptr: int) -> None:
pass

def lseek(self, ptregs: PtRegsWrapper, file: int, offset: int, whence: int) -> None:
pass

def release(self, ptregs: PtRegsWrapper, inode: int, file: int) -> None:
pass

def poll(self, ptregs: PtRegsWrapper, file: int, poll_table_struct: int) -> None:
pass

def ioctl(self, ptregs: PtRegsWrapper, file: int, cmd: int, arg: int) -> None:
pass

def compat_ioctl(self, ptregs: PtRegsWrapper, file: int, cmd: int, arg: int) -> None:
pass

def mmap(self, ptregs: PtRegsWrapper, file: int, vm_area_struct: int) -> None:
pass

def get_unmapped_area(self, ptregs: PtRegsWrapper, file: int, addr: int, len_: int, pgoff: int, flags: int) -> None:
pass


class ProcFile(VFSFile):
FS = "procfs"


class DevFile(VFSFile):
FS = "devfs"
MAJOR = -1 # -1 for dynamic
MINOR = 0

def __init__(self, *, major: int = None, minor: int = None, **kwargs):
if major is not None:
self.MAJOR = major
if minor is not None:
self.MINOR = minor
super().__init__(**kwargs)

def flush(self, ptregs: PtRegsWrapper, file: int, owner: int) -> None:
pass

def fsync(self, ptregs: PtRegsWrapper, file: int, start: int, end: int, datasync: int) -> None:
pass

def fasync(self, ptregs: PtRegsWrapper, fd: int, file: int, on: int) -> None:
pass

def lock(self, ptregs: PtRegsWrapper, file: int, cmd: int, file_lock: int) -> None:
pass


class SysFile(BaseFile):
"""
SysFS nodes usually use show/store rather than raw read/write.
"""
FS = "sysfs"

def show(self, ptregs: PtRegsWrapper, kobj, attr, buf) -> None:
pass

def store(self, ptregs: PtRegsWrapper, kobj, attr, buf, count) -> None:
pass


class SysfsBridge:
"""
Bridging class that maps SysFS show/store to VFS read/write
so we can use standard Read/Write mixins.
"""
def show(self, ptregs, kobj, attr, buf):
# Create a fake 'user_buf' pointer (actually the kernel buf)
# and call the mixin's read method.
# Note: Sysfs show ignores offset/size usually, just dumping the whole thing.
# We might need to adapt arguments based on your specific read implementation.
yield from self.read(ptregs, file=0, user_buf=buf, size=4096, loff=0)

def store(self, ptregs, kobj, attr, buf, count):
yield from self.write(ptregs, file=0, user_buf=buf, size=count, loff=0)
Loading
Loading