Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions proc/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -1503,6 +1503,12 @@ int proc_vfork(void)
(void)vm_objectPut(spawn->object);
ret = spawn->state;
vm_kfree(spawn);
if ((ret < 0) && (posix_getppid(pid) >= 0)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add some comment to this code.

/* if child managed to register itself within posix subsystem before failure,
* wait for its complete death and cleanup its posix metadata.
*/
(void)posix_waitpid(pid, NULL, 0);
}
return (ret < 0) ? ret : pid;
}

Expand Down
46 changes: 23 additions & 23 deletions vm/amap.c
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,6 @@ amap_t *amap_create(amap_t *amap, size_t *offset, size_t size)
(void)proc_lockClear(&amap->lock);
return amap;
}

amap->refs--;
}

/* Allocate anon pointer arrays in chunks
Expand Down Expand Up @@ -151,6 +149,7 @@ amap_t *amap_create(amap_t *amap, size_t *offset, size_t size)
}

if (amap != NULL) {
amap->refs--;
(void)proc_lockClear(&amap->lock);
}

Expand Down Expand Up @@ -226,9 +225,9 @@ static int amap_unmap(vm_map_t *map, void *v)
}


page_t *amap_page(vm_map_t *map, amap_t *amap, vm_object_t *o, void *vaddr, size_t aoffs, u64 offs, vm_prot_t prot)
int amap_page(vm_map_t *map, amap_t *amap, vm_object_t *o, void *vaddr, size_t aoffs, u64 offs, vm_prot_t prot, page_t **res)
{
page_t *p = NULL;
int err = EOK;
anon_t *a;
void *v, *w;

Expand All @@ -237,61 +236,60 @@ page_t *amap_page(vm_map_t *map, amap_t *amap, vm_object_t *o, void *vaddr, size
a = amap->anons[aoffs / SIZE_PAGE];
if (a != NULL) {
(void)proc_lockSet(&a->lock);
p = a->page;
*res = a->page;
if (!(a->refs > 1 && (prot & PROT_WRITE) != 0U)) {
(void)proc_lockClear(&a->lock);
(void)proc_lockClear(&amap->lock);
return p;
return EOK;
}
a->refs--;
}
else {
p = vm_objectPage(map, &amap, o, vaddr, offs);
if (p == NULL) {
err = vm_objectPage(map, &amap, o, vaddr, offs, res);
if ((err != EOK) || (*res == NULL)) {
/* amap could be invalidated while fetching from the object's store */
if (amap != NULL) {
(void)proc_lockClear(&amap->lock);
}
return NULL;
return err;
}
else if (o != NULL && (prot & PROT_WRITE) == 0U) {
(void)proc_lockClear(&amap->lock);
return p;
return EOK;
}
else {
/* No action required */
}
}

v = amap_map(map, p);
v = amap_map(map, *res);
if (v == NULL) {
if (a != NULL) {
(void)proc_lockClear(&a->lock);
}
(void)proc_lockClear(&amap->lock);
return NULL;
return -ENOMEM;
}

if (a != NULL || o != NULL) {
/* Copy from object or shared anon */
p = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP);
if (p == NULL) {
*res = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP);
if (*res == NULL) {
(void)amap_unmap(map, v);
if (a != NULL) {
(void)proc_lockClear(&a->lock);
}
(void)proc_lockClear(&amap->lock);
return NULL;
return -ENOMEM;
}
w = amap_map(map, p);
w = amap_map(map, *res);
if (w == NULL) {
vm_pageFree(p);
vm_pageFree(*res);
(void)amap_unmap(map, v);
if (a != NULL) {
(void)proc_lockClear(&a->lock);
}
(void)proc_lockClear(&amap->lock);
return NULL;
return -ENOMEM;
}
hal_memcpy(w, v, SIZE_PAGE);
(void)amap_unmap(map, w);
Expand All @@ -303,17 +301,19 @@ page_t *amap_page(vm_map_t *map, amap_t *amap, vm_object_t *o, void *vaddr, size
(void)amap_unmap(map, v);

if (a != NULL) {
a->refs--;
(void)proc_lockClear(&a->lock);
}

amap->anons[aoffs / SIZE_PAGE] = anon_new(p);
amap->anons[aoffs / SIZE_PAGE] = anon_new(*res);
if (amap->anons[aoffs / SIZE_PAGE] == NULL) {
vm_pageFree(p);
p = NULL;
vm_pageFree(*res);
*res = NULL;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Previous version returns -ENOMEM in this case.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice catch, thanks

err = -ENOMEM;
}
(void)proc_lockClear(&amap->lock);

return p;
return err;
}


Expand Down
2 changes: 1 addition & 1 deletion vm/amap.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ typedef struct _amap_t {
} amap_t;


page_t *amap_page(struct _vm_map_t *map, amap_t *amap, struct _vm_object_t *o, void *vaddr, size_t aoffs, u64 offs, vm_prot_t prot);
int amap_page(struct _vm_map_t *map, amap_t *amap, struct _vm_object_t *o, void *vaddr, size_t aoffs, u64 offs, vm_prot_t prot, page_t **res);


void amap_clear(amap_t *amap, size_t offset, size_t size);
Expand Down
47 changes: 28 additions & 19 deletions vm/map.c
Original file line number Diff line number Diff line change
Expand Up @@ -624,11 +624,7 @@ void *_vm_mmap(vm_map_t *map, void *vaddr, page_t *p, size_t size, vm_prot_t pro

for (w = vaddr; w < vaddr + size; w += SIZE_PAGE) {
if (_map_force(map, e, w, prot) != 0) {
amap_putanons(e->amap, e->aoffs, (ptr_t)w - (ptr_t)vaddr);

(void)pmap_remove(&map->pmap, vaddr, (void *)((ptr_t)w + SIZE_PAGE));

_entry_put(map, e);
(void)_vm_munmap(map, vaddr, size);
return NULL;
}
}
Expand Down Expand Up @@ -741,15 +737,18 @@ static int _map_force(vm_map_t *map, map_entry_t *e, void *paddr, vm_prot_t prot
u64 eoffs;
page_t *p = NULL;
vm_prot_t flagsCheck = map_checkProt(e->prot, prot);
amap_t *amapNew;
int err;

if (flagsCheck != 0U) {
return -EINVAL;
}
if ((((prot & PROT_WRITE) != 0U) && ((e->flags & MAP_NEEDSCOPY) != 0U)) || ((e->object == NULL) && (e->amap == NULL))) {
e->amap = amap_create(e->amap, &e->aoffs, e->size);
if (e->amap == NULL) {
amapNew = amap_create(e->amap, &e->aoffs, e->size);
if (amapNew == NULL) {
return -ENOMEM;
}
e->amap = amapNew;

e->flags &= ~MAP_NEEDSCOPY;
}
Expand All @@ -758,10 +757,14 @@ static int _map_force(vm_map_t *map, map_entry_t *e, void *paddr, vm_prot_t prot
eoffs = ((e->offs == VM_OFFS_MAX) ? VM_OFFS_MAX : (e->offs + offs));

if (e->amap == NULL) {
p = vm_objectPage(map, NULL, e->object, paddr, eoffs);
err = vm_objectPage(map, NULL, e->object, paddr, eoffs, &p);
}
else { /* if (e->object != VM_OBJ_PHYSMEM) FIXME disabled until memory objects are created for syspage progs */
p = amap_page(map, e->amap, e->object, paddr, e->aoffs + offs, eoffs, prot);
err = amap_page(map, e->amap, e->object, paddr, e->aoffs + offs, eoffs, prot, &p);
}

if (err != EOK) {
return err;
}

attr = vm_protToAttr(prot) | vm_flagsToAttr(e->flags);
Expand Down Expand Up @@ -1120,6 +1123,7 @@ int vm_mapCopy(process_t *proc, vm_map_t *dst, vm_map_t *src)
rbnode_t *n;
map_entry_t *e, *f;
size_t offs;
int err = EOK;

(void)proc_lockSet2(&src->lock, &dst->lock);

Expand Down Expand Up @@ -1153,18 +1157,23 @@ int vm_mapCopy(process_t *proc, vm_map_t *dst, vm_map_t *src)

if ((proc == NULL) || (proc->lazy == 0U)) {
for (offs = 0; offs < f->size; offs += SIZE_PAGE) {
if (_map_force(dst, f, (void *)((ptr_t)f->vaddr + offs), f->prot) != 0) {
(void)proc_lockClear(&dst->lock);
(void)proc_lockClear(&src->lock);
return -ENOMEM;
}

if (_map_force(src, e, (void *)((ptr_t)e->vaddr + offs), e->prot) != 0) {
(void)proc_lockClear(&dst->lock);
(void)proc_lockClear(&src->lock);
return -ENOMEM;
err = _map_force(dst, f, (void *)((ptr_t)f->vaddr + offs), f->prot);
if (err != EOK) {
break;
}
}
if (err != EOK) {
(void)proc_lockClear(&dst->lock);
/* Destroy map before _map_force on source map to ensure refcounts are cleared and avoid unnecessary copies in amap_page */
vm_mapDestroy(proc, dst);
}
for (offs = 0; offs < e->size; offs += SIZE_PAGE) {
LIB_ASSERT_ALWAYS(_map_force(src, e, (void *)((ptr_t)e->vaddr + offs), e->prot) == EOK, "Broken src map during mapCopy");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why kernel panic on ENOMEM?

Copy link
Contributor Author

@etiaro etiaro Mar 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ENOMEM should not happen for source map when lazy == 0. This loop should effectively clean NEEDSCOPY flags and remap pages with original attributes.

What better to do here if not kernel panic?
source process memory is corrupted, so we shouldn't let it run. To me reboot with some error message seems more reasonable on RTOS system than killing a parent process that tried to fork(). Restarting the process could be a viable option, but we are running Out Of memory so it is likely to fail anyway.

Ideally, we should stop pretending that lazy mechanism is available and handle mapCopy without changing src map at any given moment, but IMO this is a larger rewrite that deserves separate PR.

}
if (err != EOK) {
(void)proc_lockClear(&src->lock);
return err;
}
}
}

Expand Down
44 changes: 24 additions & 20 deletions vm/object.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,33 +207,36 @@ static page_t *object_fetch(oid_t oid, u64 offs)
}


page_t *vm_objectPage(vm_map_t *map, amap_t **amap, vm_object_t *o, void *vaddr, u64 offs)
int vm_objectPage(vm_map_t *map, amap_t **amap, vm_object_t *o, void *vaddr, u64 offs, page_t **res)
{
page_t *p;
int err;

if (o == NULL) {
return vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP);
*res = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP);
return (*res != NULL) ? EOK : -ENOMEM;
}

if (o == VM_OBJ_PHYSMEM) {
/* parasoft-suppress-next-line MISRAC2012-RULE_14_3 "Check is needed on targets where sizeof(offs) != sizeof(addr_t)" */
if (offs > (addr_t)-1) {
return NULL;
return -ERANGE;
}
return _page_get((addr_t)offs);
*res = _page_get((addr_t)offs);
/* res can be NULL, when address outside of defined physical maps is used */
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you describe this in the header file?

return EOK;
}

(void)proc_lockSet(&object_common.lock);

if (offs >= o->size) {
(void)proc_lockClear(&object_common.lock);
return NULL;
return -EINVAL;
}

p = o->pages[offs / SIZE_PAGE];
if (p != NULL) {
*res = o->pages[offs / SIZE_PAGE];
if (*res != NULL) {
(void)proc_lockClear(&object_common.lock);
return p;
return EOK;
}

/* Fetch page from backing store */
Expand All @@ -246,32 +249,33 @@ page_t *vm_objectPage(vm_map_t *map, amap_t **amap, vm_object_t *o, void *vaddr,

(void)proc_lockClear(&map->lock);

p = object_fetch(o->oid, offs);
*res = object_fetch(o->oid, offs);

if (vm_lockVerify(map, amap, o, vaddr, offs) != 0) {
if (p != NULL) {
vm_pageFree(p);
err = vm_lockVerify(map, amap, o, vaddr, offs);
if (err != 0) {
if (*res != NULL) {
vm_pageFree(*res);
}

return NULL;
return err;
}

(void)proc_lockSet(&object_common.lock);

if (o->pages[offs / SIZE_PAGE] != NULL) {
/* Someone loaded a page in the meantime, use it */
if (p != NULL) {
vm_pageFree(p);
if (*res != NULL) {
vm_pageFree(*res);
}

p = o->pages[offs / SIZE_PAGE];
*res = o->pages[offs / SIZE_PAGE];
(void)proc_lockClear(&object_common.lock);
return p;
return EOK;
}

o->pages[offs / SIZE_PAGE] = p;
o->pages[offs / SIZE_PAGE] = *res;
(void)proc_lockClear(&object_common.lock);
return p;
return EOK;
}


Expand Down
6 changes: 5 additions & 1 deletion vm/object.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,11 @@ int vm_objectGet(vm_object_t **o, oid_t oid);
int vm_objectPut(vm_object_t *o);


page_t *vm_objectPage(struct _vm_map_t *map, amap_t **amap, vm_object_t *o, void *vaddr, u64 offs);
/* Fills appropriate page_t struct into *res,
* allocates new page when o == NULL
* Note that when o == VM_OBJ_PHYSMEM and offs is outside of defined physical maps, then *res will be NULL even though mapping can still be made
*/
int vm_objectPage(struct _vm_map_t *map, amap_t **amap, vm_object_t *o, void *vaddr, u64 offs, page_t **res);


vm_object_t *vm_objectContiguous(size_t size);
Expand Down
1 change: 1 addition & 0 deletions vm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ page_t *vm_pageAlloc(size_t size, vm_flags_t flags);
void vm_pageFree(page_t *p);


/* returns NULL when addr is outside of defined physical maps (MMU) */
page_t *_page_get(addr_t addr);


Expand Down
Loading