diff --git a/src/genode_env/disk_backend.h b/src/genode_env/disk_backend.h index ab392fd1c..61528da59 100644 --- a/src/genode_env/disk_backend.h +++ b/src/genode_env/disk_backend.h @@ -39,7 +39,7 @@ class Phantom::Disk_backend Genode::Env &_env; Genode::Heap &_heap; - Genode::Entrypoint _ep{_env, 2*1024*sizeof(long) , "disk_ep", Genode::Affinity::Location()}; + Genode::Entrypoint _ep{_env, 30*1024*sizeof(long) , "disk_ep", Genode::Affinity::Location()}; Genode::Allocator_avl _block_alloc{&_heap}; struct DJob : Block::Connection::Job @@ -315,4 +315,4 @@ class Phantom::Disk_backend // } }; -#endif \ No newline at end of file +#endif diff --git a/src/genode_env/main.cc b/src/genode_env/main.cc index f2e76e79c..bbfda8c12 100644 --- a/src/genode_env/main.cc +++ b/src/genode_env/main.cc @@ -36,6 +36,11 @@ void setup_adapters(Env &env) Phantom::main_obj = &local_main; } +Genode::size_t Component::stack_size() { + return 60 * 1024 * sizeof(long); +} + + void test_adapters() { log("Checking if main_obj is initialized"); @@ -209,4 +214,4 @@ void Component::construct(Env &env) // int main() // { // log("What are we doing here???"); -// } \ No newline at end of file +// } diff --git a/src/include/kernel/config.h b/src/include/kernel/config.h index 4fc5d2d79..0941d693b 100644 --- a/src/include/kernel/config.h +++ b/src/include/kernel/config.h @@ -17,6 +17,7 @@ #define NEW_TASK_BAR 1 +#define N_OBJMEM_PAGES ((1024L*1024*32)/4096) #define UHCI_INTERRUPT 0 #define OHCI_INTERRUPT 1 diff --git a/src/include/phantom_disk.h b/src/include/phantom_disk.h index 0d1dc4402..f292980b0 100755 --- a/src/include/phantom_disk.h +++ b/src/include/phantom_disk.h @@ -102,6 +102,9 @@ typedef struct phantom_disk_superblock disk_page_no_t snap_to_free; // snap that is yet to free + disk_page_no_t snap_reading; + disk_page_no_t snap_already_read; + u_int32_t magic2; // 32 bits - DISK_STRUCT_MAGIC_SUPER_2 disk_page_no_t boot_list; // List of blocks with bootloader image or 0. diff --git a/src/include/vm/alloc.h b/src/include/vm/alloc.h index bff54dfd0..9bcc30b67 100755 --- a/src/include/vm/alloc.h +++ b/src/include/vm/alloc.h @@ -43,7 +43,13 @@ void debug_catch_object(const char *msg, pvm_object_storage_t *p); // gc +pvm_object_t pvm_get_gc_buffer(void); +pvm_object_t pvm_consume_gc_buffer_old(void); +void pvm_swap_gc_buffers(void); + void run_gc(void); +void run_gc_on_snap(void); +void run_gc_incremental(pvm_object_t cycle_candidates); // Make sure this object won't be deleted with refcount dec // used on sys global objects @@ -60,12 +66,15 @@ void do_ref_dec_p(pvm_object_storage_t *p); // for deferred refdec + // ------------------------------------------------------------ // shared between alloc.c and gc.c // Gigant lock for now. TODO extern hal_mutex_t *vm_alloc_mutex; +extern hal_mutex_t *vm_read_snap_mutex; +void init_gc(); void * get_pvm_object_space_start(void); void * get_pvm_object_space_end(void); diff --git a/src/include/vm/object.h b/src/include/vm/object.h index f4e22379b..7532c072b 100755 --- a/src/include/vm/object.h +++ b/src/include/vm/object.h @@ -156,11 +156,13 @@ void pvm_set_field( pvm_object_t , unsigned int no, pvm_object_t val // Need it here? It will be called by usual set field ones... pvm_object_t pvm_get_array_ofield(pvm_object_t o, unsigned int slot ); void pvm_set_array_ofield(pvm_object_t o, unsigned int slot, pvm_object_t value ); +void pvm_set_field_norefdec( pvm_object_t o, unsigned int slot, pvm_object_t value ); int get_array_size(pvm_object_t array); #define pvm_get_array_size get_array_size void pvm_append_array(pvm_object_t array, pvm_object_t value_to_append ); void pvm_pop_array(pvm_object_t array, pvm_object_t value_to_pop ); +void pvm_clear_array(pvm_object_t array); // Debug diff --git a/src/include/vm/root.h b/src/include/vm/root.h index 0db6d762f..0f5615477 100755 --- a/src/include/vm/root.h +++ b/src/include/vm/root.h @@ -108,7 +108,8 @@ struct pvm_root_t pvm_object_t root_dir; // Root object directory pvm_object_t kernel_stats; // Persisent kernel statistics pvm_object_t class_dir; // .internal.directory of all classes used - class load cache - TODO must use weak refs or cleanup on ref cnt == 1 - + pvm_object_t gc_buffer; // array of candidates for garbage collection + pvm_object_t gc_buffer_old; // array of candidates for garbage collection }; extern struct pvm_root_t pvm_root; @@ -267,7 +268,11 @@ extern struct pvm_root_t pvm_root; #define PVM_ROOT_CLASS_DIR 73 -#define PVM_ROOT_OBJECTS_COUNT (PVM_ROOT_CLASS_DIR+30) +#define PVM_ROOT_GC_BUFFER 74 + +#define PVM_ROOT_GC_BUFFER_OLD 75 + +#define PVM_ROOT_OBJECTS_COUNT (PVM_ROOT_GC_BUFFER_OLD+30) diff --git a/src/phantom/isomem/fsck.c b/src/phantom/isomem/fsck.c index 8e6a9dba5..50e879ca5 100755 --- a/src/phantom/isomem/fsck.c +++ b/src/phantom/isomem/fsck.c @@ -593,35 +593,63 @@ static void free_blocklist_page_snap_worker(disk_page_no_t toFree, int flags) pager_free_blocklist_page_locked( toFree ); } -void phantom_free_snap( - disk_page_no_t old_snap_start, - disk_page_no_t curr_snap_start, - disk_page_no_t new_snap_start - ) -{ - if( old_snap_start == 0 ) - { - SHOW_FLOW0( 0, "*** No old snap, skip list deletion ***"); - return; - } - SHOW_FLOW0( 0, "*** freeing old snap ***"); +void phantom_free_snap( + disk_page_no_t* to_free_arr, int to_free_arr_len, + disk_page_no_t* actual_arr, int actual_arr_len +) { fsck_create_map(); - fsck_list_justadd_as_free( old_snap_start ); - fsck_list_justadd_as_used( curr_snap_start ); - fsck_list_justadd_as_used( new_snap_start ); - + for (int i = 0; i < to_free_arr_len; i++) { + if (to_free_arr[i] != 0) { + ph_printf("Free old snap blk: %ld\n", (long)to_free_arr[i]); + fsck_list_justadd_as_free(to_free_arr[i]); + } + } - // go through list, free pages that are finally free in map + for (int i = 0; i < actual_arr_len; i++) { + if (actual_arr[i] != 0) { + ph_printf("Mark new snap blk: %ld\n", (long)actual_arr[i]); + fsck_list_justadd_as_used(actual_arr[i]); + } + } + iterate_map(free_snap_worker, MAP_FREE); iterate_map(free_blocklist_page_snap_worker, MAP_LIST_NODE); pager_commit_active_free_list(); - fsck_delete_map(); - } +// void phantom_free_snap( +// disk_page_no_t old_snap_start, +// disk_page_no_t curr_snap_start, +// disk_page_no_t new_snap_start, +// disk_page_no_t snap_reading +// ) +// { +// if( old_snap_start == 0 ) +// { +// SHOW_FLOW0( 0, "*** No old snap, skip list deletion ***"); +// return; +// } + +// SHOW_FLOW0( 0, "*** freeing old snap ***"); + // fsck_create_map(); + +// fsck_list_justadd_as_free( old_snap_start ); +// fsck_list_justadd_as_used( curr_snap_start ); +// fsck_list_justadd_as_used( new_snap_start ); +// fsck_list_justadd_as_used( snap_reading ); + +// // go through list, free pages that are finally free in map +// iterate_map(free_snap_worker, MAP_FREE); +// iterate_map(free_blocklist_page_snap_worker, MAP_LIST_NODE); +// pager_commit_active_free_list(); + +// fsck_delete_map(); + +// } + diff --git a/src/phantom/isomem/genode_threads.c b/src/phantom/isomem/genode_threads.c index 291f7fe71..b13a3fbf0 100644 --- a/src/phantom/isomem/genode_threads.c +++ b/src/phantom/isomem/genode_threads.c @@ -65,6 +65,7 @@ phantom_thread_t *get_current_thread() tid_t get_current_tid(void) { + ph_printf("get_current_tid\n"); _stub_print(); return (int)pthread_self(); } @@ -191,4 +192,4 @@ errno_t t_set_snapper_flag(void) // // Threads do not work in this mode // } -#endif \ No newline at end of file +#endif diff --git a/src/phantom/isomem/main.c b/src/phantom/isomem/main.c index 7869bf8c4..74987f465 100644 --- a/src/phantom/isomem/main.c +++ b/src/phantom/isomem/main.c @@ -244,6 +244,7 @@ int phantom_main_entry_point(int argc, char **argv, char **envp) // heap_init_mutex(); // After threads // OK pvm_alloc_threaded_init(); // After threads // OK + init_gc(); // Scheduler is contolled by Genode /* diff --git a/src/phantom/isomem/pager.h b/src/phantom/isomem/pager.h index 00b36ca26..423893967 100755 --- a/src/phantom/isomem/pager.h +++ b/src/phantom/isomem/pager.h @@ -128,12 +128,10 @@ void pager_start_io(); void phantom_fsck(int do_rebuild ); -void phantom_free_snap( - disk_page_no_t old_snap_start, - disk_page_no_t curr_snap_start, - disk_page_no_t new_snap_start +void phantom_free_snap( + disk_page_no_t* to_free_arr, int to_free_arr_len, + disk_page_no_t* actual_arr, int actual_arr_len ); - #endif // PAGER_H diff --git a/src/phantom/isomem/vm_cn_udp.c b/src/phantom/isomem/vm_cn_udp.c index af85b6dfb..105dfc161 100755 --- a/src/phantom/isomem/vm_cn_udp.c +++ b/src/phantom/isomem/vm_cn_udp.c @@ -113,6 +113,7 @@ errno_t cn_udp_init( struct data_area_4_connection *c, struct data_area_4_thread SHOW_FLOW( 1, "connect udp %s", suffix ); struct cn_udp_volatile *vp = c->v_kernel_state; + (void) vp; #if HAVE_NET int rc = udp_open( &vp->udp_endpoint ); diff --git a/src/phantom/isomem/vm_connect.c b/src/phantom/isomem/vm_connect.c index e9c617dd0..cd5e618ad 100755 --- a/src/phantom/isomem/vm_connect.c +++ b/src/phantom/isomem/vm_connect.c @@ -424,8 +424,8 @@ errno_t phantom_connect_object( struct data_area_4_connection *da, struct data_a da->v_kernel_state = 0; // now create object for persistent state - - if(te->persistent_state_size) + // since this function also restarts connection, this may already be created + if(te->persistent_state_size && da->p_kernel_state == NULL) { pvm_object_t bo = pvm_create_binary_object( te->persistent_state_size, 0); if( pvm_isnull(bo) ) @@ -436,15 +436,10 @@ errno_t phantom_connect_object( struct data_area_4_connection *da, struct data_a return ENOMEM; } - // XXX : the object is created at each re-connect, so added refdec to free - // old objects. The real question : why do we recreate it in the first place? - // also probably worth to make sure contents of objects are not used anywhere... although it is not incref'ed so... - ref_dec_o(da->p_kernel_state_object); da->p_kernel_state_object = bo; struct data_area_4_binary *bda = pvm_object_da( bo, binary ); da->p_kernel_state = &(bda->data); - } else da->p_kernel_state = 0; diff --git a/src/phantom/isomem/vm_map.c b/src/phantom/isomem/vm_map.c index 6babe0c00..4e6d3b25c 100755 --- a/src/phantom/isomem/vm_map.c +++ b/src/phantom/isomem/vm_map.c @@ -214,6 +214,29 @@ static inline void page_touch_history_arg(vm_page *p, int arg) static void page_fault( vm_page *p, int is_writing ); +// merge with addr_to_vm_page? +long addr_to_page_index(unsigned long addr) +{ + addr -= (addr_t)vm_map_start_of_virtual_address_space; + + if( addr >= (((unsigned long)vm_map_vm_page_count) * __MEM_PAGE)) + return -1; + + return addr / __MEM_PAGE; +} + +int addr_to_page_offset(unsigned long addr) +{ + addr -= (addr_t)vm_map_start_of_virtual_address_space; + + if( addr >= (((unsigned long)vm_map_vm_page_count) * __MEM_PAGE)) + return -1; + + return addr % __MEM_PAGE; +} + +vm_page *get_vm_page(unsigned long index) { return &vm_map_map[index]; } + static vm_page *addr_to_vm_page(unsigned long addr, struct trap_state *ts) { // ph_printf("addr_raw=%X\n", addr); @@ -1458,11 +1481,16 @@ void do_snapshot(void) if(enabled) hal_sti(); + pvm_count_allocated_objects(); + phantom_snapper_reenable_threads(); #if USE_SNAP_WAIT signal_snap_snap_passed(); // or before enabling threads? #endif + pvm_swap_gc_buffers(); // merge into 1 function?? + pvm_object_t cycle_candidates = pvm_consume_gc_buffer_old(); + // YES, YES, YES, Snap is nearly done. // Here we have to wait a little and start processing pages manually @@ -1488,6 +1516,8 @@ void do_snapshot(void) // TODO - free prev snap first! -- (why?) + run_gc_incremental(cycle_candidates); + disk_page_no_t new_snap_head = 0; @@ -1657,23 +1687,39 @@ static int request_snap_flag = 0; static int seconds_between_snaps = 5; static void free_old_snapshot() { - if (pager_superblock_ptr()->snap_to_free == 0) return; - + hal_mutex_lock(vm_read_snap_mutex); disk_page_no_t to_free = pager_superblock_ptr()->snap_to_free; + disk_page_no_t snap_already_read = pager_superblock_ptr()->snap_already_read; + ph_printf("snap_to_free: %d, snap_already_read: %d\n", to_free, snap_already_read); + if (to_free == 0 && snap_already_read == 0) { + hal_mutex_unlock(vm_read_snap_mutex); + return; + } + disk_page_no_t snap_reading = pager_superblock_ptr()->snap_reading; + ph_printf("snap_reading: %d\n", snap_reading); + disk_page_no_t actual1 = pager_superblock_ptr()->prev_snap; disk_page_no_t actual2 = pager_superblock_ptr()->last_snap; + disk_page_no_t actual3 = (snap_reading == actual1 || snap_reading == actual2) ? 0 : snap_reading; + disk_page_no_t actual_arr[] = { actual1, actual2, actual3 }; - phantom_free_snap( to_free, actual1, actual2 ); - pager_superblock_ptr()->snap_to_free = 0; + disk_page_no_t free1 = to_free; + disk_page_no_t free2 = (snap_already_read != free1) ? snap_already_read : 0; + disk_page_no_t free_arr[] = { free1, free2 }; + + phantom_free_snap(free_arr, 2, actual_arr, 3); + ph_printf("we returned from phantom_free_snap\n"); + pager_superblock_ptr()->snap_to_free = 0; + pager_superblock_ptr()->snap_already_read = 0; // Force all io to complete BEFORE updating superblock pager_fence(); - pager_update_superblock(); pager_free_blocklist_pages(); pager_commit_active_free_list(); pager_update_superblock(); + hal_mutex_unlock(vm_read_snap_mutex); } static void vm_map_snapshot_thread(void) @@ -1685,7 +1731,7 @@ static void vm_map_snapshot_thread(void) { SHOW_FLOW0( 1, "Snapshot loop"); SHOW_FLOW(0, "%d %d %d", stop_lazy_pageout_thread, vm_regular_snaps_enabled, request_snap_flag); - + free_old_snapshot(); if( stop_lazy_pageout_thread ) diff --git a/src/phantom/isomem/vm_map.h b/src/phantom/isomem/vm_map.h index a3aef8b18..31ffcf63f 100755 --- a/src/phantom/isomem/vm_map.h +++ b/src/phantom/isomem/vm_map.h @@ -175,6 +175,9 @@ void vm_page_req_pageout(); void vm_map_wait_for_finish(void); + long addr_to_page_index(unsigned long addr); + int addr_to_page_offset(unsigned long addr); + //extern vm_map_impl vm_map; /* diff --git a/src/phantom/vm/alloc.c b/src/phantom/vm/alloc.c index 16e352fc3..6daa4aded 100755 --- a/src/phantom/vm/alloc.c +++ b/src/phantom/vm/alloc.c @@ -198,7 +198,8 @@ static void init_free_object_header(pvm_object_storage_t *op, unsigned int size) } -#define PVM_MIN_FRAGMENT_SIZE (sizeof(pvm_object_storage_t) + sizeof(int) ) /* should be a minimal object size at least */ +// sizeof(void*) should account for object size aligning (currently 8 byte alignment is used) +#define PVM_MIN_FRAGMENT_SIZE (sizeof(pvm_object_storage_t) + sizeof(void*) ) /* should be a minimal object size at least */ // returns allocated object @@ -402,6 +403,10 @@ static pvm_object_t pvm_find(unsigned int size, int arena) if( PVM_OBJECT_AH_ALLOCATOR_FLAG_FREE != curr->_ah.alloc_flags ) // refcount == 0, but refzero or in buffer or both { DEBUG_PRINT("(c)"); + // not sure why exactly can allocator free objects but it may + // conflict with GC, if this panic ever happens - needs further + // investigation + panic("What is this??"); refzero_process_children( curr ); // Supposed to be free here } @@ -463,7 +468,7 @@ static pvm_object_storage_t * pool_alloc(unsigned int size, int arena) * */ if(vm_alloc_mutex) hal_mutex_unlock( vm_alloc_mutex ); - run_gc(); + // run_gc(); if(vm_alloc_mutex) hal_mutex_lock( vm_alloc_mutex ); #else break; //skip GC, until we bring context to the allocator @@ -662,7 +667,7 @@ static int memcheck_one(unsigned int i, void * start, void * end) return 0; } - ph_printf("\n\n-----------------\nMemcheck ERROR: reached out of arena end at 0x%p (%ld bytes size)\n-----------------\n\n", curr, (long) (((void *)curr) - start) ); + ph_printf("\n\n-----------------\nMemcheck ERROR: reached out of arena end at %p (%ld bytes size)\n-----------------\n\n", curr, (long) (((void *)curr) - start) ); return 1; } @@ -676,7 +681,7 @@ static int64_t count_objects(void *start, void *end) int64_t arena_size = ((char*) end) - ((char*) start); int64_t used_bytes = 0; pvm_object_t curr = start; - int int_count = 0, long_count = 0, str_count = 0, arr_count = 0, page_count = 0; + // int int_count = 0, long_count = 0, str_count = 0, arr_count = 0, page_count = 0; while(((void *)curr) < end) { if(!pvm_alloc_is_object(curr)) return -1; @@ -686,19 +691,20 @@ static int64_t count_objects(void *start, void *end) count++; used_bytes += curr->_ah.exact_size; - if (curr->_class == pvm_get_int_class()) int_count++; - if (curr->_class == pvm_get_long_class()) long_count++; - if (curr->_class == pvm_get_string_class()) str_count++; - if (curr->_class == pvm_get_array_class()) arr_count++; - if (curr->_class == pvm_get_page_class()) page_count++; + // if (curr->_class == pvm_get_int_class()) int_count++; + // if (curr->_class == pvm_get_long_class()) long_count++; + // if (curr->_class == pvm_get_string_class()) str_count++; + // if (curr->_class == pvm_get_array_class()) arr_count++; + // if (curr->_class == pvm_get_page_class()) page_count++; } curr = (pvm_object_t)( ((void *)curr) + curr->_ah.exact_size ); } int percent_used = 100 * used_bytes / arena_size; - SHOW_INFO(0, "Arena @%p, ints: %d, longs: %d, strs: %d, arrs: %d, pages: %d | %d%% used", - start, int_count, long_count, str_count, arr_count, page_count, percent_used); + SHOW_INFO(0, "Arena @%p, %d%% used", start, percent_used); + // SHOW_INFO(0, "Arena @%p, ints: %d, longs: %d, strs: %d, arrs: %d, pages: %d | %d%% used", + // start, int_count, long_count, str_count, arr_count, page_count, percent_used); return count; } diff --git a/src/phantom/vm/backtrace.c b/src/phantom/vm/backtrace.c index f70a8481d..57a470ebc 100644 --- a/src/phantom/vm/backtrace.c +++ b/src/phantom/vm/backtrace.c @@ -83,7 +83,8 @@ void pvm_exec_panic( const char *reason, struct data_area_4_thread *tda ) void pvm_backtrace_current_thread(void) -{ +{ + ph_printf("in backtrace\n"); errno_t e = ENOENT; int tid = get_current_tid(); if( tid < 0 ) @@ -131,6 +132,7 @@ void pvm_backtrace_current_thread(void) void pvm_backtrace(struct data_area_4_thread *tda) { + ph_printf("inside inner backtrace\n"); struct pvm_code_handler *code = &tda->code; if(code->IP > code->IP_max) diff --git a/src/phantom/vm/create.c b/src/phantom/vm/create.c index 616857aaa..327c43dae 100755 --- a/src/phantom/vm/create.c +++ b/src/phantom/vm/create.c @@ -337,15 +337,13 @@ pvm_create_page_object( int n_slots, pvm_object_t *init, int init_slots ) assert(init_slots < n_slots); - int i; - for( i = 0; i < init_slots; i++ ) { - data_area[i] = *init++; - ref_inc_o(data_area[i]); // XXX : hack to avoid elements to be freed when the original page is deleted + // assuming pvm_object_create_dynamic returns zeroe'd out data area + // init new page + if (init) { + ph_memcpy(data_area, init, init_slots * sizeof(pvm_object_t)); + // clean old one to avoid refdecs on deletion + ph_memset(init, 0, init_slots * sizeof(pvm_object_t)); } - - for( ; i < n_slots; i++ ) - data_area[i] = pvm_get_null_object(); - return _data; } @@ -524,18 +522,19 @@ void pvm_internal_init_class(pvm_object_t os) } -void pvm_gc_iter_class(gc_iterator_call_t func, pvm_object_t os, void *arg) +void pvm_gc_iter_class(gc_iterator_call_t func, pvm_object_t os, void* arg) { - struct data_area_4_class *da = (struct data_area_4_class *)&(os->da); - gc_fcall( func, arg, da->object_default_interface ); - gc_fcall( func, arg, da->class_name ); - gc_fcall( func, arg, da->class_parent ); + struct data_area_4_class* da = (struct data_area_4_class*)&(os->da); + gc_fcall(func, arg, da->object_default_interface); + gc_fcall(func, arg, da->class_name); + gc_fcall(func, arg, da->class_parent); - gc_fcall( func, arg, da->static_vars ); + gc_fcall(func, arg, da->static_vars); - gc_fcall( func, arg, da->ip2line_maps ); - gc_fcall( func, arg, da->method_names ); - gc_fcall( func, arg, da->field_names ); + gc_fcall(func, arg, da->ip2line_maps); + gc_fcall(func, arg, da->method_names); + gc_fcall(func, arg, da->field_names); + gc_fcall(func, arg, da->const_pool); } @@ -574,6 +573,9 @@ void pvm_gc_iter_thread(gc_iterator_call_t func, pvm_object_t os, void *arg) gc_fcall( func, arg, da->call_frame ); gc_fcall( func, arg, da->owner ); gc_fcall( func, arg, da->environment ); +#if NEW_VM_SLEEP + gc_fcall( func, arg, da->cond_mutex ); +#endif } @@ -640,6 +642,7 @@ void pvm_internal_init_mutex(pvm_object_t os) //in_method = 0; } +// this iter is turned off right now void pvm_gc_iter_mutex(gc_iterator_call_t func, pvm_object_t os, void *arg) { struct data_area_4_mutex * da = (struct data_area_4_mutex *)os->da; @@ -648,12 +651,14 @@ void pvm_gc_iter_mutex(gc_iterator_call_t func, pvm_object_t os, void *arg) //pvm_spin_init( &da->pvm_lock ); // in_method = 0; + // we don't want mutex to keep other threads alive, so this is not needed gc_fcall( func, arg, da->waiting_threads_array ); //for( i = 0; i < MAX_MUTEX_THREADS; i++ ) // gc_fcall( func, arg, da->waiting_threads[i] ); + // we don't want mutex to keep the owner thread alive, so this is not needed gc_fcall( func, arg, pvm_da_to_object(da->owner_thread) ); } @@ -1101,6 +1106,7 @@ void pvm_gc_iter_directory(gc_iterator_call_t func, pvm_object_t os, void *arg) gc_fcall( func, arg, da->keys ); gc_fcall( func, arg, da->values ); + gc_fcall( func, arg, da->flags_container ); } @@ -1135,6 +1141,10 @@ void pvm_gc_iter_connection(gc_iterator_call_t func, pvm_object_t os, void *arg pvm_object_t ot; //ot.interface = 0; + // TODO: find out why it could be 0? fix? + if (da->owner == 0) { + return; + } ot = (void *) (((addr_t)da->owner)-DA_OFFSET()); gc_fcall( func, arg, ot ); @@ -1158,7 +1168,7 @@ void pvm_gc_finalizer_connection( pvm_object_t os ) void pvm_restart_connection( pvm_object_t o ) { struct data_area_4_connection *da = pvm_object_da( o, connection ); -ph_printf("restarting connection"); + ph_printf("restarting connection"); da->kernel = 0; int ret = pvm_connect_object(o,0); diff --git a/src/phantom/vm/gc.c b/src/phantom/vm/gc.c index fddaa850f..e18381706 100644 --- a/src/phantom/vm/gc.c +++ b/src/phantom/vm/gc.c @@ -41,18 +41,38 @@ // see Bacon algorithm (US Patent number 6879991, issued April 12, 2005) or (US Patent number 7216136 issued 8 May 2007) // TODO: not implemented, // Need persistent cycles_root_buffer not collected by usual gc/refcount - new internal object type? +#define get_array_slot_nocheck(arr, i) ((pvm_object_t*)(pvm_data_area(arr, array)->page->da))[i] +static int find_object_in_buffer(pvm_object_t p) { + pvm_object_t array = pvm_get_gc_buffer(); + for (int i = 0; i < pvm_get_array_size(array); i++) { + if (get_array_slot_nocheck(array, i) == p) return i; + } + + return -1; +} static void cycle_root_buffer_add_candidate(pvm_object_storage_t *p) { - (void)p; + if (find_object_in_buffer(p) >= 0) return; + pvm_append_array(pvm_get_gc_buffer(), p); } -static void cycle_root_buffer_rm_candidate(pvm_object_storage_t *p) +void cycle_root_buffer_rm_candidate(pvm_object_storage_t *p) { - (void)p; + if (!pvm_get_gc_buffer()) return; + int index = find_object_in_buffer(p); + if (index < 0) return; + + struct data_area_4_array *da = pvm_data_area(pvm_get_gc_buffer(), array); + pvm_object_t *page = da_po_ptr((da->page)->da); + if (index != da->used_slots - 1) { + page[index] = page[da->used_slots - 1]; + } + page[da->used_slots - 1] = NULL; + da->used_slots--; } static void cycle_root_buffer_clear() { - //just set size to zero, so regular GC will ignore it gracefully + pvm_clear_array(pvm_get_gc_buffer()); } void gc_collect_cycles() { @@ -166,6 +186,7 @@ static int free_unmarked() static void mark_tree(pvm_object_storage_t * p) { + ph_printf("\nGC: process another object\n"); p->_ah.gc_flags = gc_flags_last_generation; // set @@ -246,7 +267,8 @@ void refzero_process_children( pvm_object_storage_t *p ) do_refzero_process_children( p ); - if ( p->_ah.alloc_flags & PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER ) + // if ( p->_ah.alloc_flags & PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER ) + if ( !(p->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_CHILDFREE) ) cycle_root_buffer_rm_candidate( p ); p->_ah.alloc_flags = PVM_OBJECT_AH_ALLOCATOR_FLAG_FREE; @@ -416,18 +438,22 @@ void do_ref_dec_p(pvm_object_storage_t *p) } // if we decrement refcount and stil above zero - mark an object as potential cycle root; // and internal objects can't be a cycle root (sic!) + else { nonzero:; - if ( !(p->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_INTERNAL) ) - { - if ( !(p->_ah.alloc_flags & PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER) ) - { - cycle_root_buffer_add_candidate(p); - p->_ah.alloc_flags |= PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER ; - } - p->_ah.alloc_flags |= PVM_OBJECT_AH_ALLOCATOR_FLAG_WENT_DOWN ; // set down flag + // an internal object *can* form a loop: just put the array in itself (array is an internal object) + // if ( !(p->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_INTERNAL) ) + //{ + if ( !(p->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_CHILDFREE) ) { + //if ( !(p->_ah.alloc_flags & PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER) ) + //{ + cycle_root_buffer_add_candidate(p); + // p->_ah.alloc_flags |= PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER ; + //} + //p->_ah.alloc_flags |= PVM_OBJECT_AH_ALLOCATOR_FLAG_WENT_DOWN ; // set down flag } + //} } //nokill:; } @@ -463,9 +489,10 @@ void ref_inc_p(pvm_object_storage_t *p) { //(p->_ah.refCount)++; ATOMIC_ADD_AND_FETCH( &(p->_ah.refCount), 1 ); - - if ( p->_ah.alloc_flags & PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER ) - p->_ah.alloc_flags &= ~PVM_OBJECT_AH_ALLOCATOR_FLAG_WENT_DOWN ; //clear down flag + if ( !(p->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_CHILDFREE) ) + cycle_root_buffer_rm_candidate(p); + // if ( p->_ah.alloc_flags & PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER ) + // p->_ah.alloc_flags &= ~PVM_OBJECT_AH_ALLOCATOR_FLAG_WENT_DOWN ; //clear down flag } } @@ -479,12 +506,12 @@ void ref_saturate_p(pvm_object_storage_t *p) STAT_INC_CNT( OBJECT_SATURATE ); // Saturated object can't be a loop collection candidate. Can it? - if ( p->_ah.alloc_flags & PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER ) { - cycle_root_buffer_rm_candidate( p ); - - p->_ah.alloc_flags &= ~PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER; - p->_ah.alloc_flags &= ~PVM_OBJECT_AH_ALLOCATOR_FLAG_WENT_DOWN; - } + cycle_root_buffer_rm_candidate( p ); + // if ( p->_ah.alloc_flags & PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER ) { +// + // p->_ah.alloc_flags &= ~PVM_OBJECT_AH_ALLOCATOR_FLAG_IN_BUFFER; + // p->_ah.alloc_flags &= ~PVM_OBJECT_AH_ALLOCATOR_FLAG_WENT_DOWN; + // } assert( p->_ah.object_start_marker == PVM_OBJECT_START_MARKER ); assert( p->_ah.alloc_flags == PVM_OBJECT_AH_ALLOCATOR_FLAG_ALLOCATED ); diff --git a/src/phantom/vm/gc_on_snap.c b/src/phantom/vm/gc_on_snap.c new file mode 100644 index 000000000..c3ba61cb9 --- /dev/null +++ b/src/phantom/vm/gc_on_snap.c @@ -0,0 +1,773 @@ +/** + * + * Phantom OS + * + * Copyright (C) 2005-2009 Dmitry Zavalishin, dz@dz.ru + * + * Fast and dirty garbage collection + * +**/ + +#define DEBUG_MSG_PREFIX "vm.gc_on_snap" +#include +#define debug_level_flow 10 +#define debug_level_error 10 +#define debug_level_info 10 + +#include +#include +#include +#include + +#include "../isomem/vm_map.h" +#include "../isomem/pager.h" +#include "../isomem/pagelist.h" + +#include +#include +#include +#include + +#include + +#include + +static long long shift; +hal_mutex_t *vm_read_snap_mutex; +static hal_mutex_t _vm_read_snap_mutex; + +void init_gc() { + if (hal_mutex_init(&_vm_read_snap_mutex, "ReadSnap")) + panic("Can't init read snap mutex"); + + vm_read_snap_mutex = &_vm_read_snap_mutex; +} + + +static pvm_object_t shift_ptr(pvm_object_t o, long long shift) { + return (pvm_object_t) ((char *) o + shift); +} + +static unsigned char gc_flags_last_generation = 0; + +static void mark_tree(pvm_object_storage_t *p); + +static char *load_snap() { + unsigned long page_count = N_OBJMEM_PAGES + 1; + SHOW_FLOW0(1, "Started"); + + hal_mutex_lock(vm_read_snap_mutex); + + if (pager_superblock_ptr()->snap_already_read != 0) { + ph_printf("\n!!! Previously loaded snapshot not cleaned yet !!!\n"); + return 0; + } + + disk_page_no_t snap_start = 0; + + if (pager_superblock_ptr()->last_snap != 0) { + hal_printf("-- Use last snap\n"); + snap_start = pager_superblock_ptr()->last_snap; + } else if (pager_superblock_ptr()->prev_snap != 0) { + hal_printf("-- Missing last snap, use previous snap\n"); + snap_start = pager_superblock_ptr()->prev_snap; + } + + pager_superblock_ptr()->snap_reading = snap_start; + hal_mutex_unlock(vm_read_snap_mutex); + + if (snap_start == 0) { + hal_printf("\n!!! No pagelist to load !!!\n"); + return 0; + } + + hal_printf("Loading pagelist from %d...\n", snap_start); + pagelist loader; + pagelist_init(&loader, snap_start, 0, DISK_STRUCT_MAGIC_SNAP_LIST); + pagelist_seek(&loader); + + disk_page_no_t curr_block; + char *snapshot = ph_calloc(page_count, PAGE_SIZE); + char *snapshot_seeker = snapshot; + unsigned int np; + for (np = 0; np < page_count; np++) { + if (np % 500 == 0) + ph_printf("np: %d/%d\n", np, page_count); + + if (!pagelist_read_seq(&loader, &curr_block)) { + ph_printf("\n!!! Incomplete pagelist !!!\n"); + snapshot = 0; + break; + } + + if (curr_block == 0) { + snapshot_seeker += PAGE_SIZE; + continue; // change + } + + disk_page_io sb; + disk_page_io_init(&sb); + errno_t rc = disk_page_io_load_sync(&sb, curr_block); + + if (rc) { + panic("failed to load snapshot in gc\n"); + } + + ph_memcpy(snapshot_seeker, disk_page_io_data(&sb), PAGE_SIZE); + snapshot_seeker += PAGE_SIZE; + } + + pagelist_finish(&loader); + + hal_mutex_lock(vm_read_snap_mutex); + pager_superblock_ptr()->snap_reading = 0; + pager_superblock_ptr()->snap_already_read = snap_start; + hal_mutex_unlock(vm_read_snap_mutex); + + return snapshot; +} + +static pvm_object_storage_t **collect_unmarked(char *start); + +static int free_unmarked(pvm_object_storage_t **to_free); + +static void gc_process_children(gc_iterator_call_t f, pvm_object_storage_t *p, void *arg); + +static void mark_tree_o(pvm_object_t o, void *arg); + +void run_gc_on_snap() { + // synchonization? + + gc_flags_last_generation++; // bump generation + if (gc_flags_last_generation == 0) gc_flags_last_generation++; // != 0 'cause allocation reset gc_flags to zero + + //phantom_virtual_machine_threads_stopped++; // pretend we are stopped + //TODO: refine synchronization + + // First pass - tree walk, mark visited. + // Root is always used. All other objects, including pvm_root and pvm_root.threads_list, should be reached from root... + // char* snapshot = load_snap(pager_superblock_ptr()->disk_page_count); + char *snapshot = load_snap(); + if (snapshot == 0) { + ph_printf("\n!!! No snapshot loaded !!!\n"); + return; + } + + shift = snapshot - (char *) get_pvm_object_space_start(); + ph_printf("real space start: %p\n", get_pvm_object_space_start()); + ph_printf("real space end: %p\n", get_pvm_object_space_end()); + ph_printf("snapshot is loaded\n"); + ph_printf("shift: %d\n", shift); + ph_printf("snapshot addr: %p\n", snapshot); + ph_printf("reference start marker: %d\n", PVM_OBJECT_START_MARKER); + + mark_tree((pvm_object_storage_t *) snapshot); + pvm_object_storage_t **to_free = collect_unmarked(snapshot); + ph_printf("Collect unmarked finished\n"); + + // Second pass - linear walk to free unused objects. + int freed = free_unmarked(to_free); + + // if (freed > 0) + // ph_printf("\ngc: %i objects freed\n", freed); +} + +struct disk_page_io gc_io; + +typedef struct gc_map { + uint64_t *keys; + uint64_t *values; + + uint64_t capacity; + uint64_t count; +} gc_map_t; + +void gc_map_init(gc_map_t *map) { + map->count = 0; + map->capacity = 16; + map->keys = ph_malloc(sizeof(uint64_t) * map->capacity); + map->values = ph_malloc(sizeof(uint64_t) * map->capacity); +} + +void gc_map_release(gc_map_t *map) { + ph_free(map->keys); + ph_free(map->values); +} + +uint64_t *__gc_map_try_get(gc_map_t *map, uint64_t key) { + for (uint64_t i = 0; i < map->count; i++) { + if (map->keys[i] == key) { + return &map->values[i]; + } + } + + return NULL; +} + +// 0 if found, -1 if not +int gc_map_try_get(gc_map_t *map, uint64_t key, uint64_t *out) { + assert(out); + + uint64_t *local_out = __gc_map_try_get(map, key); + if (local_out) { + *out = *local_out; + return 0; + } + + return -1; +} + +void __increase_capacity(void **container, uint64_t prev_capacity, uint64_t new_capacity, size_t elem_size) { + void *new_container = ph_malloc(elem_size * new_capacity); + ph_memcpy(new_container, *container, prev_capacity * elem_size); + ph_free(*container); + *container = new_container; +} + +void gc_map_insert_nocheck(gc_map_t *map, uint64_t key, uint64_t value) { + if (map->count == map->capacity) { + map->capacity *= 2; + __increase_capacity((void**) &map->keys, map->count, map->capacity, sizeof(key)); + __increase_capacity((void**) &map->values, map->count, map->capacity, sizeof(value)); + } + + map->keys[map->count] = key; + map->values[map->count] = value; + map->count++; +} + +// returns previous value (or 0 if none) +uint64_t gc_map_increment(gc_map_t *map, uint64_t key) { + uint64_t *value_ptr = __gc_map_try_get(map, key); + if (value_ptr) { + (*value_ptr)++; + return *value_ptr - 1; + } + + gc_map_insert_nocheck(map, key, 1); + return 0; +} + +// set value if key is present in map. 0 on success, 1 on skip +static int gc_map_set_or_skip(gc_map_t *map, uint64_t key, uint64_t value, uint64_t *old_value) { + uint64_t *value_ptr = __gc_map_try_get(map, key); + if (!value_ptr) return 1; + + *old_value = *value_ptr; + (*value_ptr) = value; + return 0; +} + +extern vm_page *get_vm_page(unsigned long index); + +static unsigned char *load_page(gc_map_t *map, uint64_t page_index) { + unsigned char *page = NULL; + + if (gc_map_try_get(map, page_index, &page) == 0) { + return page; + } + + // page not loaded, load now: + vm_page *page_struct = get_vm_page(page_index); + if (page_struct->make_page) { // non empty page + if (disk_page_io_load_sync(&gc_io, page_struct->make_page)) panic("Could not load from disk"); + page = ph_malloc(PAGE_SIZE); + ph_memcpy(page, disk_page_io_data(&gc_io), PAGE_SIZE); + } + + gc_map_insert_nocheck(map, page_index, page); + return page; +} + +static const unsigned char *extract_header_part(unsigned char **pages, int start, int size) { + static char buffer[8]; + assert(size <= sizeof(buffer)); + + for (int i = 0; i < size; i++) { + long current_offset = start + i; + unsigned char curr_byte; + + if (current_offset < PAGE_SIZE) { + curr_byte = *(unsigned char*)(pages[0] + current_offset); + } else { + curr_byte = *(unsigned char*)(pages[1] + current_offset - PAGE_SIZE); + } + + buffer[i] = curr_byte; + } + + return buffer; +} + +static void write_header_part(unsigned char **pages, int start, int size, const unsigned char *data) +{ + assert(data && size > 0); + + for (int i = 0; i < size; i++) { + long current_offset = start + i; + + if (current_offset < PAGE_SIZE) { + *(pages[0] + current_offset) = data[i]; + } else { + *(pages[1] + current_offset - PAGE_SIZE) = data[i]; + } + } +} + +#define OBJ_FLAGS_OFFSET __offsetof(pvm_object_storage_t, _flags) +#define OBJ_FLAGS_SIZE sizeof(uint32_t) +#define OBJ_REFCNT_OFFSET __offsetof(pvm_object_storage_t, _ah.refCount) +#define OBJ_REFCNT_SIZE sizeof(int32_t) +#define OBJ_MARKER_OFFSET __offsetof(pvm_object_storage_t, _ah.object_start_marker) +#define OBJ_MARKER_SIZE sizeof(unsigned int) +#define OBJ_AFLAGS_OFFSET __offsetof(pvm_object_storage_t, _ah.alloc_flags) +#define OBJ_AFLAGS_SIZE sizeof(unsigned char) +#define OBJ_ESIZE_OFFSET __offsetof(pvm_object_storage_t, _ah.exact_size) +#define OBJ_ESIZE_SIZE sizeof(unsigned int) + +// if the object is childfree return NULL (since why waste time on them?) +static pvm_object_t gc_get_parent_object_image(gc_map_t *map, pvm_object_t real_object, bool *to_free) +{ + long page_index = addr_to_page_index((uintptr_t)real_object); + assert(page_index >= 0); + int object_offset = addr_to_page_offset((uintptr_t)real_object); + int flags_field_offset = object_offset + OBJ_FLAGS_OFFSET; + bool flags_on_first_page = flags_field_offset + OBJ_FLAGS_SIZE <= PAGE_SIZE; + *to_free = false; + + // Assuming object header (including `_flags` and `da_size`) fits on a single page + // (seems like a reasonable assumption tho) + unsigned char *base_pages[] = { + load_page(map, page_index), + !flags_on_first_page ? load_page(map, page_index + 1) : NULL + }; + + assert(base_pages[0]); // empty page cannot contain object header (can it?) + assert(base_pages[1] || flags_on_first_page); + + // check start marker + int start_marker_offset = object_offset + OBJ_MARKER_OFFSET; + unsigned int start_marker = *(unsigned int*)extract_header_part( + base_pages, start_marker_offset, OBJ_MARKER_SIZE); + assert(start_marker == PVM_OBJECT_START_MARKER); + + // check object image is allocated + int alloc_flags_offset = object_offset + OBJ_AFLAGS_OFFSET; + unsigned char alloc_flags = *(unsigned char*)extract_header_part(base_pages, + alloc_flags_offset, OBJ_AFLAGS_SIZE); + assert(alloc_flags & PVM_OBJECT_AH_ALLOCATOR_FLAG_ALLOCATED); + + // childfree check + uint32_t flags = *(uint32_t*)extract_header_part(base_pages, flags_field_offset, + OBJ_FLAGS_SIZE); + if (flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_CHILDFREE) return NULL; + + int size_field_offset = object_offset + OBJ_ESIZE_OFFSET; + unsigned int exact_object_size = *(unsigned int*)extract_header_part( + base_pages, size_field_offset, OBJ_ESIZE_SIZE); + + long object_end_offset = object_offset + exact_object_size; + + if (object_end_offset <= PAGE_SIZE) { // whole object is in the first page + return base_pages[0] + object_offset; + } + + int pages_required = (object_end_offset + PAGE_SIZE - 1) / PAGE_SIZE; + unsigned char *obj_image = ph_malloc(exact_object_size); + unsigned char *cur = obj_image; + + ph_memcpy(cur, base_pages[0] + object_offset, PAGE_SIZE - object_offset); + cur += PAGE_SIZE - object_offset; + + for (int curr_page = 1; cur != obj_image + exact_object_size; curr_page++) { + long remaining_size = exact_object_size - (cur - obj_image); + assert(remaining_size > 0); + unsigned char *cur_page = load_page(map, page_index + curr_page); + int to_copy = remaining_size <= PAGE_SIZE ? remaining_size : PAGE_SIZE; + + if (cur_page) + ph_memcpy(cur, cur_page, to_copy); + else + ph_memset(cur, 0, to_copy); + cur += to_copy; + } + + *to_free = true; + return obj_image; +} + +static int32_t extract_real_refcount(gc_map_t *map, pvm_object_t real_object) { + long page_index = addr_to_page_index((uintptr_t)real_object); + assert(page_index >= 0); + int object_offset = addr_to_page_offset((uintptr_t)real_object); + int refcnt_offset = object_offset + OBJ_REFCNT_OFFSET; + bool refcnt_on_first_page = refcnt_offset + OBJ_REFCNT_SIZE <= PAGE_SIZE; + + unsigned char *base_pages[] = { + load_page(map, page_index), + !refcnt_on_first_page ? load_page(map, page_index + 1) : NULL + }; + + assert(base_pages[0]); + assert(base_pages[1] || refcnt_on_first_page); + int32_t refcount = *(int32_t*)extract_header_part( + base_pages, refcnt_offset, sizeof(int32_t)); + + return refcount; +} + +static void load_header_pages(gc_map_t *map, pvm_object_t real_object) { + long page_index = addr_to_page_index((uintptr_t)real_object); + assert(page_index >= 0); + int object_offset = addr_to_page_offset((uintptr_t)real_object); + int flags_field_offset = object_offset + OBJ_FLAGS_OFFSET; + bool flags_on_first_page = flags_field_offset + OBJ_FLAGS_SIZE <= PAGE_SIZE; + + load_page(map, page_index); + if (!flags_on_first_page) load_page(map, page_index + 1); +} + +gc_map_t *new_refcnt_map, *loaded_pages_map; + +static void mark_tree_incremental(pvm_object_t real_object, void *data) { + if (real_object == NULL) return; + int mark_mode = data ? 1 : 0; + + assert(data == NULL || (intptr_t)data == 1); + if (mark_mode == 1) { // reverse pass + uint64_t old_refcnt; // mark object as non-garbage + assert(gc_map_set_or_skip(new_refcnt_map, real_object, 0, &old_refcnt) == 0); + if (old_refcnt == 0) return; // was already marked, return + } else { + // local reference inc + // if refcount was non-zero, the object is already marked - return + if (gc_map_increment(new_refcnt_map, real_object) > 0) return; + } + + bool to_free = false; + // copy of the object + pvm_object_t object_image = gc_get_parent_object_image(loaded_pages_map, real_object, &to_free); + if (object_image == NULL) return; // object is childfree, return + // all the needed asserts and checks are performed in gc_get_parent_object_image + + gc_process_children(mark_tree_incremental, object_image, data); + + if (to_free) ph_free(object_image); +} + +static void gc_free_object_image(gc_map_t *map, pvm_object_t real_object, bool *is_modified) +{ + long page_index = addr_to_page_index((uintptr_t)real_object); + assert(page_index >= 0); + int object_offset = addr_to_page_offset((uintptr_t)real_object); + // assuming flags are further in the object than refcount + int alloc_flags_offset = object_offset + OBJ_AFLAGS_OFFSET; + int refcount_offset = object_offset + OBJ_REFCNT_OFFSET; + bool flags_on_first_page = alloc_flags_offset + OBJ_AFLAGS_SIZE <= PAGE_SIZE; + + uint64_t *in_map_addresses[] = { + __gc_map_try_get(map, page_index), + !flags_on_first_page ? __gc_map_try_get(map, page_index + 1) : NULL + }; + + unsigned char *pages[] = { + *in_map_addresses[0], flags_on_first_page ? NULL : *in_map_addresses[1] + }; + + // free image object + unsigned char new_aflags = PVM_OBJECT_AH_ALLOCATOR_FLAG_FREE; + int32_t new_refcnt = 0; + write_header_part(pages, alloc_flags_offset, OBJ_AFLAGS_SIZE, &new_aflags); + write_header_part(pages, refcount_offset, OBJ_REFCNT_SIZE, &new_refcnt); + + // mark page images dirty + is_modified[in_map_addresses[0] - map->values] = true; + if (!flags_on_first_page) { + is_modified[in_map_addresses[1] - map->values] = true; + } +} + +int freed_size = 0; + +extern void cycle_root_buffer_rm_candidate(pvm_object_storage_t *p); + +static void free_incremetnal(gc_map_t *map, pvm_object_t real_object, bool *is_modified) { + pvm_object_is_allocated_assert(real_object); + + if (real_object->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_FINALIZER) { + int syscall_id = pvm_object_da(real_object->_class, class)->sys_table_id; + gc_finalizer_func_t func = pvm_internal_classes[syscall_id].finalizer; + + if (func != 0) + func(real_object); + + // should run ref_dec for children? + // yes, probably + } + + // free real object + real_object->_ah.refCount = 0; + real_object->_ah.alloc_flags = PVM_OBJECT_AH_ALLOCATOR_FLAG_FREE; + freed_size += real_object->_ah.exact_size; + + if ( !(real_object->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_CHILDFREE) ) + cycle_root_buffer_rm_candidate(real_object); + + gc_free_object_image(map, real_object, is_modified); +} + +#define get_array_slot_nocheck(arr, i) ((pvm_object_t*)(pvm_data_area(arr, array)->page->da))[i] + +// we will run on the unfinished snapshot - no need for protection from deletion +void run_gc_incremental(pvm_object_t cycle_candidates) { + SHOW_INFO0(1, "GC LIGHT: START"); + uint64_t start_time = hal_system_time(); + // gc buffer only contains objects with non-zero refcount (== no invalid/freed objects) + assert(cycle_candidates); + gc_map_t new_refcnt, loaded_pages; + gc_map_init(&new_refcnt); gc_map_init(&loaded_pages); + new_refcnt_map = &new_refcnt; + loaded_pages_map = &loaded_pages; + + disk_page_io_init(&gc_io); + + SHOW_INFO(1, "GC LIGHT: Initialized. Objects to process: %d", pvm_get_array_size(cycle_candidates)); + // recount references (forward pass) + for (int i = 0; i < pvm_get_array_size(cycle_candidates); i++) { + mark_tree_incremental(get_array_slot_nocheck(cycle_candidates, i), 0); + } + + SHOW_INFO(1, "GC LIGHT: Forward pass done. Pages loaded: %d", loaded_pages.count); + // find objects with external references (reverse pass) + for (int i = 0; i < new_refcnt.count; i++) { + pvm_object_t object = (pvm_object_t) new_refcnt.keys[i]; + int64_t counted_refs = new_refcnt.values[i]; + if (counted_refs == 0) continue; // marked as non-garbage in earlier iterations + // all candidates have 1 extra reference counted + for (int i = 0; i < pvm_get_array_size(cycle_candidates); i++) { + if (get_array_slot_nocheck(cycle_candidates, i) == object) { + counted_refs--; + break; + } + } + int32_t actual_refs = extract_real_refcount(loaded_pages_map, object); + if (counted_refs == actual_refs) continue; // potential garbage + if (counted_refs > actual_refs) { + SHOW_ERROR(1, "Too many references counted: %d/%d @%p", + counted_refs, actual_refs, object); + dumpo(object); + continue; // replace with panic later? + } + + // counted less than actual - there is an external reference + mark_tree_incremental(object, 1); // reverse pass + } + + SHOW_INFO0(1, "GC LIGHT: Reverse pass done"); + int freed_count = 0; + freed_size = 0; + // free garbage, mark dirty pages to page out + // +4 for pages that *may* be required for `cycle_candidates` + bool *is_modified = ph_calloc(loaded_pages.count + 4, sizeof(bool)); + for (int i = 0; i < new_refcnt.count; i++) { + pvm_object_t object = (pvm_object_t) new_refcnt.keys[i]; + + if (new_refcnt.values[i] == 0) continue; // not garbage + // ph_printf("Freeing: "); + // pvm_object_print(pvm_data_area(object->_class, class)->class_name); + // ph_printf("\n"); + free_incremetnal(loaded_pages_map, object, is_modified); + freed_count++; + } + + // remove cycle candidates array from snapshot too + pvm_object_t array_page = pvm_data_area(cycle_candidates, array)->page; + if (array_page) { + load_header_pages(loaded_pages_map, array_page); + free_incremetnal(loaded_pages_map, array_page, is_modified); + } + load_header_pages(loaded_pages_map, cycle_candidates); + free_incremetnal(loaded_pages_map, cycle_candidates, is_modified); + + SHOW_INFO0(1, "GC LIGHT: Pageout..."); + // sync pageout of dirty pages + int pages_written = 0; + //void *gc_io_mem = gc_io.mem; + for (int i = 0; i < loaded_pages.count; i++) { + if (!loaded_pages.values[i]) continue; + + if (is_modified[i]) { + vm_page *page_struct = get_vm_page(loaded_pages.keys[i]); + // should figure out if it is possible to directly write from loaded_pages.values[i] + //gc_io.mem = loaded_pages.values[i]; + ph_memcpy(disk_page_io_data(&gc_io), loaded_pages.values[i], PAGE_SIZE); + + if (disk_page_io_save_sync(&gc_io, page_struct->make_page)) panic("Could not save to disk"); + pages_written++; + + assert(page_struct->make_page); + } + + ph_free((void*) loaded_pages.values[i]); + } + //gc_io.mem = gc_io_mem; + + SHOW_INFO(1, "GC LIGHT: Pageout done. Pages written: %d", pages_written); + disk_page_io_finish(&gc_io); + gc_map_release(&new_refcnt); + gc_map_release(&loaded_pages); + + SHOW_INFO0(1, "GC LIGHT: FINISH"); + uint64_t end_time = hal_system_time(); + SHOW_INFO(1, "GC stats: pages loaded: %d, objects freed: %d, objects iterated: %d. Elapsed: %d ms", + loaded_pages.count, freed_count, new_refcnt.count, (end_time - start_time) / 1000); + SHOW_INFO(1, "GC stats: freed %d KB (%d bytes)", freed_size / 1024, freed_size); +} + +// silently delete object +void release_gc_buffer(pvm_object_t gc_buffer) { + if (gc_buffer == NULL) return; + + pvm_object_t p = pvm_data_area(gc_buffer, array)->page; + if (p) { + p->_ah.refCount = 0; + p->_ah.alloc_flags = PVM_OBJECT_AH_ALLOCATOR_FLAG_FREE; + } + + gc_buffer->_ah.refCount = 0; + gc_buffer->_ah.alloc_flags = PVM_OBJECT_AH_ALLOCATOR_FLAG_FREE; +} + +static void mark_tree(pvm_object_storage_t *obj_in_snap) { + // ph_printf("\nGC: process another object\n"); + // ph_printf("Flags: '"); + // print_object_flags(obj_in_snap); + // ph_printf("'\n"); + // ph_printf("object class:\n"); + // dumpo(obj_in_snap->_class); + + // ph_printf("p: %p, p->ah: %p, p->da: %p\n", obj_in_snap, &obj_in_snap->_ah, obj_in_snap->da); + // ph_printf("start marker: %d\n", obj_in_snap->_ah.object_start_marker); + + obj_in_snap->_ah.gc_flags = gc_flags_last_generation; // set + + // ph_printf("assert start marker and allocated\n"); + assert(obj_in_snap->_ah.object_start_marker == PVM_OBJECT_START_MARKER); + assert(obj_in_snap->_ah.alloc_flags & PVM_OBJECT_AH_ALLOCATOR_FLAG_ALLOCATED); + + // ph_printf("check if childfree\n"); + // Fast skip if no children - + if (!(obj_in_snap->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_CHILDFREE)) { + // ph_printf("not childfree, call gc_process_children\n"); + // ph_printf("p addr: %p\n", &obj_in_snap); + gc_process_children(mark_tree_o, obj_in_snap, 0); + } +} + +static void mark_tree_o(pvm_object_t obj_in_pvm, void *arg) { + if (obj_in_pvm == 0) // Don't try to process null objects + return; + + pvm_object_t obj_in_snap = shift_ptr(obj_in_pvm, shift); + + if (obj_in_snap->_ah.gc_flags != gc_flags_last_generation) + mark_tree(obj_in_snap); + + //if (o.interface->_ah.gc_flags != gc_flags_last_generation) mark_tree( o.interface ); +} + +static void gc_process_children(gc_iterator_call_t f, pvm_object_storage_t *obj_in_snap, void *arg) { + // ph_printf("GC: process children\n"); + f(obj_in_snap->_class, arg); + + // Fast skip if no children - done! + //if( p->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_CHILDFREE ) + // return; + + // plain non internal objects - + if (!(obj_in_snap->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_INTERNAL)) { + // ph_printf("External object, normal iter\n"); + unsigned i; + + for (i = 0; i < da_po_limit(obj_in_snap); i++) { + f(da_po_ptr(obj_in_snap->da)[i], arg); + } + return; + } + + // We're here if object is internal. + + // Now find and call class-specific function: pvm_gc_iter_* + // ph_printf("Internal object, get iter method\n"); + gc_iterator_func_t iter = pvm_internal_classes[pvm_object_da(obj_in_snap->_class, class)-> + sys_table_id].iter; + + iter(f, obj_in_snap, arg); +} + +static pvm_object_storage_t **collect_unmarked(char *start) { + char *end = (char *) start + N_OBJMEM_PAGES * 4096L; + char *curr; + + int freed = 0; + for (curr = start; curr < end; curr += ((pvm_object_storage_t *) curr)->_ah.exact_size) { + pvm_object_storage_t *p = (pvm_object_storage_t *) curr; + assert(p->_ah.object_start_marker == PVM_OBJECT_START_MARKER); + + if ((p->_ah.gc_flags != gc_flags_last_generation) && (p->_ah.alloc_flags != PVM_OBJECT_AH_ALLOCATOR_FLAG_FREE)) + //touch not accessed but allocated objects + { + ++freed; + } + } + + LOG_INFO_(5, "Found %d objects to free", freed); + + pvm_object_storage_t **to_free = ph_calloc((freed + 1), sizeof(pvm_object_storage_t *)); + + int i = 0; + for (curr = start; curr < end; curr += ((pvm_object_storage_t *) curr)->_ah.exact_size) { + pvm_object_storage_t *p = (pvm_object_storage_t *) curr; + assert(p->_ah.object_start_marker == PVM_OBJECT_START_MARKER); + + if ((p->_ah.gc_flags != gc_flags_last_generation) && (p->_ah.alloc_flags != PVM_OBJECT_AH_ALLOCATOR_FLAG_FREE)) + //touch not accessed but allocated objects + { + to_free[i++] = shift_ptr(p, -shift); + } + } + + to_free[i] = 0; + return to_free; +} + +static int free_unmarked(pvm_object_storage_t **to_free) { + int i = 0; + long long freed_size = 0; + + vm_lock_persistent_memory(); + while (to_free[i] != 0) { + pvm_object_storage_t *p = to_free[i]; + pvm_object_is_allocated_assert(p); + freed_size += p->_ah.exact_size; + + if (p->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_FINALIZER) { + // based on the assumption that finalizer is only valid for some internal childfree objects - is it correct? + gc_finalizer_func_t func = pvm_internal_classes[pvm_object_da(p->_class, class)->sys_table_id].finalizer; + + if (func != 0) + func(p); + + // should run ref_dec for children? + } + + p->_ah.refCount = 0; // free now + p->_ah.alloc_flags = PVM_OBJECT_AH_ALLOCATOR_FLAG_FREE; // free now + i++; + } + vm_unlock_persistent_memory(); + + LOG_INFO_(5, "GC finished: freed %d KB", freed_size / 1024); + + return i; +} diff --git a/src/phantom/vm/internal.c b/src/phantom/vm/internal.c index d3194a7f4..093270323 100755 --- a/src/phantom/vm/internal.c +++ b/src/phantom/vm/internal.c @@ -213,11 +213,11 @@ struct internal_class pvm_internal_classes[] = PVM_ROOT_OBJECT_TTY_CLASS, syscall_table_4_tty, &n_syscall_table_4_tty, pvm_internal_init_tty, - 0 /*pvm_gc_iter_tty*/, + pvm_gc_iter_tty, pvm_gc_finalizer_tty, // no finalizer pvm_restart_tty, sizeof(struct data_area_4_tty), - PHANTOM_OBJECT_STORAGE_FLAG_IS_INTERNAL|PHANTOM_OBJECT_STORAGE_FLAG_IS_CHILDFREE, + PHANTOM_OBJECT_STORAGE_FLAG_IS_INTERNAL, 0 }, /* { diff --git a/src/phantom/vm/object.c b/src/phantom/vm/object.c index abad83f7e..bff3b6e3f 100755 --- a/src/phantom/vm/object.c +++ b/src/phantom/vm/object.c @@ -58,8 +58,10 @@ pvm_object_t pvm_get_array_ofield(pvm_object_t o, unsigned int slot ) if( !(PHANTOM_OBJECT_STORAGE_FLAG_IS_INTERNAL & (o->_flags) ) || !( PHANTOM_OBJECT_STORAGE_FLAG_IS_RESIZEABLE & (o->_flags) ) - ) - pvm_exec_panic0( "attempt to do an array op to non-array" ); + ) { + dumpo((addr_t)o); + pvm_exec_panic0( "attempt to do get_ofield to non-array" ); + } struct data_area_4_array *da = (struct data_area_4_array *)&(o->da); @@ -77,8 +79,10 @@ void pvm_set_array_ofield(pvm_object_t o, unsigned int slot, pvm_object_t value if( !(PHANTOM_OBJECT_STORAGE_FLAG_IS_INTERNAL & (o->_flags) ) || !( PHANTOM_OBJECT_STORAGE_FLAG_IS_RESIZEABLE & (o->_flags) ) - ) - pvm_exec_panic0( "attempt to do an array op to non-array" ); + ) { + dumpo((addr_t)o); + pvm_exec_panic0( "attempt to do set_ofield to non-array" ); + } if( PHANTOM_OBJECT_STORAGE_FLAG_IS_IMMUTABLE & (o->_flags) ) pvm_exec_panic0( "attempt to set_array_ofield for immutable" ); @@ -137,8 +141,10 @@ void pvm_pop_array(pvm_object_t array, pvm_object_t value_to_pop ) if( !(PHANTOM_OBJECT_STORAGE_FLAG_IS_INTERNAL & (array->_flags) ) || !( PHANTOM_OBJECT_STORAGE_FLAG_IS_RESIZEABLE & (array->_flags) ) - ) - pvm_exec_panic0( "attempt to do an array op to non-array" ); + ) { + pvm_exec_panic0( "attempt to do pop to non-array" ); + dumpo((addr_t)array); + } if( PHANTOM_OBJECT_STORAGE_FLAG_IS_IMMUTABLE & (array->_flags) ) pvm_exec_panic0( "attempt to pop_array for immutable" ); @@ -162,6 +168,28 @@ void pvm_pop_array(pvm_object_t array, pvm_object_t value_to_pop ) pvm_exec_panic0( "attempt to remove non existing element from array" ); } +void pvm_clear_array(pvm_object_t array) { + struct data_area_4_array *da = (struct data_area_4_array *)&(array->da); + + verify_p(array); + if( + !(PHANTOM_OBJECT_STORAGE_FLAG_IS_INTERNAL & (array->_flags) ) || + !( PHANTOM_OBJECT_STORAGE_FLAG_IS_RESIZEABLE & (array->_flags) ) + ) { + pvm_exec_panic0( "attempt to clear non-array" ); + dumpo((addr_t)array); + } + + if( PHANTOM_OBJECT_STORAGE_FLAG_IS_IMMUTABLE & (array->_flags) ) + pvm_exec_panic0( "attempt to clear immutable array" ); + + if (da->page) { + pvm_object_t *p = da_po_ptr((da->page)->da); + ph_memset(p, 0, da->page_size * sizeof(pvm_object_t)); // clean page to avoid refdecs on contained objects + } + da->page_size = 16; + da->used_slots = 0; +} /** @@ -248,6 +276,33 @@ pvm_set_field( pvm_object_t o, unsigned int slot, pvm_object_t value ) if(da_po_ptr(o->da)[slot]) ref_dec_o(da_po_ptr(o->da)[slot]); //decr old value da_po_ptr(o->da)[slot] = value; } + +void +pvm_set_field_norefdec( pvm_object_t o, unsigned int slot, pvm_object_t value ) +{ + verify_p(o); + verify_o(value); + + if( PHANTOM_OBJECT_STORAGE_FLAG_IS_IMMUTABLE & (o->_flags) ) + pvm_exec_panic0( "attempt to set_field for immutable" ); + + if( PHANTOM_OBJECT_STORAGE_FLAG_IS_INTERNAL & (o->_flags) ) + { + if( PHANTOM_OBJECT_STORAGE_FLAG_IS_RESIZEABLE & (o->_flags) ) + { + pvm_set_array_ofield( o, slot, value ); + return; + } + pvm_exec_panic0( "attempt to save to internal" ); + } + + if( slot >= da_po_limit(o) ) + { + pvm_exec_panic0( "set: slot index out of bounds" ); + } + + da_po_ptr(o->da)[slot] = value; +} /* void pvm_set_ofield( pvm_object_t op, unsigned int slot, pvm_object_t value ) @@ -481,6 +536,7 @@ void dumpo( addr_t addr ) ph_printf("Flags: '"); print_object_flags(o); ph_printf("', "); + ph_printf("refCnt: %d\n", o->_ah.refCount); //ph_printf("', da size: %ld, ", (long)(o->_da_size) ); if(o->_flags & PHANTOM_OBJECT_STORAGE_FLAG_IS_STRING) diff --git a/src/phantom/vm/root.c b/src/phantom/vm/root.c index a513d9538..d84821e6d 100755 --- a/src/phantom/vm/root.c +++ b/src/phantom/vm/root.c @@ -59,6 +59,7 @@ static void runclass(int, char **); static void process_generic_restarts(pvm_object_t root); static void process_specific_restarts(void); +extern void release_gc_buffer(pvm_object_t gc_buffer); /** * @@ -120,6 +121,12 @@ void pvm_root_init(void) pvm_root.root_dir = pvm_get_field( root, PVM_ROOT_OBJECT_ROOT_DIR ); pvm_root.kernel_stats = pvm_get_field( root, PVM_ROOT_KERNEL_STATISTICS ); pvm_root.class_dir = pvm_get_field( root, PVM_ROOT_CLASS_DIR ); + // currently snapshot contains the old *deleted* buffer, replace with a fresh one + pvm_set_field_norefdec(root, PVM_ROOT_GC_BUFFER, pvm_create_array_object()); // -- + pvm_root.gc_buffer = pvm_get_field( root, PVM_ROOT_GC_BUFFER ); + pvm_root.gc_buffer_old = pvm_get_field( root, PVM_ROOT_GC_BUFFER_OLD ); + assert(pvm_is_null(pvm_root.gc_buffer_old)); + // release_gc_buffer(pvm_consume_gc_buffer_old()); // delete old gc buffer (from reality) process_specific_restarts(); @@ -233,6 +240,8 @@ static void pvm_save_root_objects() pvm_set_field( root, PVM_ROOT_KERNEL_STATISTICS, pvm_root.kernel_stats ); pvm_set_field( root, PVM_ROOT_CLASS_DIR, pvm_root.class_dir ); + pvm_set_field( root, PVM_ROOT_GC_BUFFER, pvm_root.gc_buffer ); + pvm_set_field( root, PVM_ROOT_GC_BUFFER_OLD, pvm_root.gc_buffer_old ); } @@ -321,6 +330,7 @@ static void pvm_create_root_objects() pvm_root.kernel_stats = pvm_create_binary_object( STAT_CNT_PERSISTENT_DA_SIZE, 0 ); pvm_root.class_dir = pvm_create_directory_object(); + pvm_root.gc_buffer = pvm_create_array_object(); ref_saturate_o(pvm_root.threads_list); //Need it? @@ -747,6 +757,24 @@ int pvm_disconnect_object(pvm_object_t o, struct data_area_4_thread *tc) return phantom_disconnect_object( da ); } +pvm_object_t pvm_get_gc_buffer() { + return pvm_root.gc_buffer; +} +pvm_object_t pvm_consume_gc_buffer_old() { + pvm_object_t old_buffer = pvm_root.gc_buffer_old; + pvm_root.gc_buffer_old = NULL; + pvm_object_t root = get_root_object_storage(); + pvm_set_field_norefdec( root, PVM_ROOT_GC_BUFFER_OLD, pvm_root.gc_buffer_old ); + return old_buffer; +} +void pvm_swap_gc_buffers() { + assert(pvm_root.gc_buffer_old == NULL); + pvm_root.gc_buffer_old = pvm_root.gc_buffer; + pvm_root.gc_buffer = pvm_create_array_object(); + pvm_object_t root = get_root_object_storage(); + pvm_set_field_norefdec( root, PVM_ROOT_GC_BUFFER, pvm_root.gc_buffer ); + pvm_set_field_norefdec( root, PVM_ROOT_GC_BUFFER_OLD, pvm_root.gc_buffer_old ); +} diff --git a/src/phantom/vm/stacks.c b/src/phantom/vm/stacks.c index fc1f6d9dd..463b94c43 100755 --- a/src/phantom/vm/stacks.c +++ b/src/phantom/vm/stacks.c @@ -713,7 +713,7 @@ void pvm_internal_init_istack(pvm_object_t os ) void pvm_gc_iter_istack(gc_iterator_call_t func, pvm_object_t os, void *arg) { - struct data_area_4_object_stack *da = (struct data_area_4_object_stack *)&(os->da); + struct data_area_4_integer_stack *da = (struct data_area_4_integer_stack *)&(os->da); // No objects in the integer stack, but please visit linked list ifself diff --git a/src/phantom/vm/sys/i_io.c b/src/phantom/vm/sys/i_io.c index dd7ab9520..903d412d0 100644 --- a/src/phantom/vm/sys/i_io.c +++ b/src/phantom/vm/sys/i_io.c @@ -435,11 +435,7 @@ void pvm_gc_iter_io(gc_iterator_call_t func, pvm_object_t os, void *arg) { struct data_area_4_io *da = (struct data_area_4_io *)os->da; - (void) da; - - //gc_fcall( func, arg, ot ); - //gc_fcall( func, arg, da->p_kernel_state_object ); - //gc_fcall( func, arg, da->callback ); + func(da->name, arg); } diff --git a/src/phantom/vm/sys/i_wasm.c b/src/phantom/vm/sys/i_wasm.c index b8e2863c0..569ff3b86 100644 --- a/src/phantom/vm/sys/i_wasm.c +++ b/src/phantom/vm/sys/i_wasm.c @@ -475,7 +475,10 @@ static bool initialize_wasm(pvm_object_t o) { (pvm_da_to_object(master_instance))->_class != pvm_get_wasm_class() ) { + assert(da->master_ref == 0); master_instance = da; + } else { // master already exists + da->master_ref = master_instance; } hal_mutex_unlock(vm_alloc_mutex); @@ -898,15 +901,18 @@ void pvm_gc_iter_wasm(gc_iterator_call_t func, pvm_object_t self, void *arg) { func(wasm_da->env_vars_array, arg); func(wasm_da->wasm_sandboxed_objects, arg); - if (wasm_da != master_instance) { - func(pvm_da_to_object(master_instance), arg); + if (wasm_da->master_ref) { + ph_printf("mutex and cond arrays are not marked\n"); + func(pvm_da_to_object(wasm_da->master_ref), arg); return; } - func(master_instance->wasm_native_symbols, arg); - func(master_instance->wasm_runtime_objects, arg); - func(master_instance->wasm_mutex_array, arg); - func(master_instance->wasm_cond_array, arg); + ph_printf("marking mutex and cond arrays\n"); + + func(wasm_da->wasm_native_symbols, arg); + func(wasm_da->wasm_runtime_objects, arg); + func(wasm_da->wasm_mutex_array, arg); + func(wasm_da->wasm_cond_array, arg); } syscall_func_t syscall_table_4_wasm[16] = @@ -931,6 +937,14 @@ void pvm_restart_wasm(pvm_object_t o) { assert(master_instance == NULL); r_assert(initialize_wasm(o)); // sets master_instance + ph_printf("mutex array allocated: %d\n", pvm_object_is_allocated(master_instance->wasm_mutex_array)); + dumpo(master_instance->wasm_mutex_array); + ph_printf("cond array allocated: %d\n", pvm_object_is_allocated(master_instance->wasm_cond_array)); + dumpo(master_instance->wasm_cond_array); + dump_uarray(master_instance->wasm_mutex_array, "Mutexes"); + dump_uarray(master_instance->wasm_cond_array, "Conds"); + + foreach_in_uarray(master_instance->wasm_mutex_array, entry) { if (*entry) { if (hal_mutex_init((hal_mutex_t*) *entry, NULL) < 0) diff --git a/src/phantom/vm/sys/i_wasm.h b/src/phantom/vm/sys/i_wasm.h index 20716199c..bb86dfd43 100644 --- a/src/phantom/vm/sys/i_wasm.h +++ b/src/phantom/vm/sys/i_wasm.h @@ -5,6 +5,9 @@ struct data_area_4_wasm { + // 0 if self is master, points to master instance otherwise + struct data_area_4_wasm *master_ref; + // .internal.array, stores objects containing description of wasm native symbols pvm_object_t wasm_native_symbols; // .internal.array, stores objects allocated by wasm runtime using malloc diff --git a/src/phantom/vm/syscall_tty.c b/src/phantom/vm/syscall_tty.c index f4fbc855e..c6c793cae 100755 --- a/src/phantom/vm/syscall_tty.c +++ b/src/phantom/vm/syscall_tty.c @@ -360,10 +360,9 @@ void pvm_internal_init_tty( pvm_object_t ttyos ) void pvm_gc_iter_tty(gc_iterator_call_t func, pvm_object_t os, void *arg) { - (void) func; - (void) os; - (void) arg; - // Empty + struct data_area_4_tty *tty = (struct data_area_4_tty *)os->da; + + func(tty->o_pixels, arg); } void pvm_gc_finalizer_tty( pvm_object_t os ) diff --git a/src/phantom/vm/syscall_win.c b/src/phantom/vm/syscall_win.c index 6cdc3b301..1646250af 100755 --- a/src/phantom/vm/syscall_win.c +++ b/src/phantom/vm/syscall_win.c @@ -232,6 +232,7 @@ static int win_setSize_26( pvm_object_t me, pvm_object_t *ret, struct data_area_ struct data_area_4_window *da = pvm_data_area( me, window ); //struct data_area_4_binary *bda = (struct data_area_4_binary *)da->o_pixels->da; //window_handle_t w = (window_handle_t)&bda->data; + (void) da; CHECK_PARAM_COUNT(2); diff --git a/src/phantom/vm/vm.cmake b/src/phantom/vm/vm.cmake index bffae2f3b..4f83dec52 100644 --- a/src/phantom/vm/vm.cmake +++ b/src/phantom/vm/vm.cmake @@ -33,6 +33,7 @@ set (PHANTOM_PVM_SOURCE internal.c bulk.c gc.c + gc_on_snap.c wpaint.c syscall_net.c directory.c diff --git a/src/plib/bin/classes b/src/plib/bin/classes index cbeb76bc4..cd0ab756f 100644 Binary files a/src/plib/bin/classes and b/src/plib/bin/classes differ diff --git a/src/plib/sys/Makefile b/src/plib/sys/Makefile index ff9a550bf..020ff9e93 100755 --- a/src/plib/sys/Makefile +++ b/src/plib/sys/Makefile @@ -74,8 +74,9 @@ $(BINDIR)/phantom.os.pc: phantom.os.time.pc phantom.osimpl.pc internal.world.pc $(BINDIR)/phantom.util.hashmap.pc: phantom.util.hashpair.pc $(BINDIR)/ru.dz.demo.chart.pc: phantom.os.pc internal.io.tty.pc internal.window.pc internal.connection.pc ru.dz.phantom.system.runnable.pc -$(BINDIR)/ru.dz.demo.start.pc: ru.dz.demo.weather.pc ru.dz.demo.chart.pc +$(BINDIR)/ru.dz.demo.start.pc: ru.dz.demo.weather.pc ru.dz.demo.chart.pc ru.dz.demo.wasm.pc ru.dz.phantom.persistence_test.pc ru.dz.phantom.performance_test.pc ru.dz.demo.garbage.pc $(BINDIR)/ru.dz.demo.weather.pc: ru.dz.demo.chart.pc internal.bitmap.pc +$(BINDIR)/ru.dz.demo.garbage.pc: ru.dz.demo.garbage_unit.pc $(BINDIR)/ru.dz.phantom.backgrounds.pc: ../resources/backgrounds/phantom_dz_new_1024_768.ppm diff --git a/src/plib/sys/classes b/src/plib/sys/classes index cbeb76bc4..cd0ab756f 100644 Binary files a/src/plib/sys/classes and b/src/plib/sys/classes differ diff --git a/src/plib/sys/classes.ar b/src/plib/sys/classes.ar index 6305bac78..a125481a8 100644 Binary files a/src/plib/sys/classes.ar and b/src/plib/sys/classes.ar differ diff --git a/src/plib/sys/src/ru.dz/ru.dz.demo.garbage.ph b/src/plib/sys/src/ru.dz/ru.dz.demo.garbage.ph new file mode 100755 index 000000000..8638e8509 --- /dev/null +++ b/src/plib/sys/src/ru.dz/ru.dz.demo.garbage.ph @@ -0,0 +1,48 @@ +/** + * + * Phantom OS - Phantom language library + * +**/ + +package .ru.dz.demo; + +import .phantom.os; +import .internal.io.tty; +import .internal.string; +import .internal.connection; +import .ru.dz.demo.garbage_unit; + +class garbage +{ + void run(var console : .internal.io.tty) + { + console.putws("Started garbage generation demo scenario\n"); + + var sleep : .internal.connection; + sleep = new .internal.connection(); + sleep.connect("tmr:"); + + var garbage_created : .internal.int; + garbage_created = 0; + while (1) { + var A : garbage_unit; + var B : garbage_unit; + var C : garbage_unit; + A = new garbage_unit(); + B = new garbage_unit(); + C = new garbage_unit(); + + A.load_payload(); + A.set_next(B); + B.set_next(C); + C.set_next(A); + + garbage_created = garbage_created + 3; + console.putws("Iteration complete, total garbage created: "); + console.putws(garbage_created.toString()); + console.putws("\n"); + sleep.block(null, 25); + } + } +}; + diff --git a/src/plib/sys/src/ru.dz/ru.dz.demo.garbage_unit.ph b/src/plib/sys/src/ru.dz/ru.dz.demo.garbage_unit.ph new file mode 100755 index 000000000..1823a17ed --- /dev/null +++ b/src/plib/sys/src/ru.dz/ru.dz.demo.garbage_unit.ph @@ -0,0 +1,28 @@ +/** + * + * Phantom OS - Phantom language library + * +**/ + +package .ru.dz.demo; + +import .internal.string; + +class garbage_unit +{ + var next : garbage_unit; + var payload : .internal.string; + + void load_payload() { + payload = load_existing_payload(); + } + + void set_next(var n : garbage_unit) { + next = n; + } + + .internal.string load_existing_payload() { + return import "../resources/test_images/cat.jpg" ; + } +}; + diff --git a/src/plib/sys/src/ru.dz/ru.dz.demo.start.ph b/src/plib/sys/src/ru.dz/ru.dz.demo.start.ph index c5216f4e3..f633d884d 100755 --- a/src/plib/sys/src/ru.dz/ru.dz.demo.start.ph +++ b/src/plib/sys/src/ru.dz/ru.dz.demo.start.ph @@ -14,26 +14,26 @@ package .ru.dz.demo; import .phantom.os; import .internal.io.tty; +import .ru.dz.phantom.performance_test; +import .ru.dz.phantom.persistence_test; import .ru.dz.demo.weather; import .ru.dz.demo.chart; +import .ru.dz.demo.wasm; +import .ru.dz.demo.garbage; attribute const * ->!; class start { - var wv : .ru.dz.demo.weather; - // var cv : .ru.dz.demo.chart; + var demo : .ru.dz.demo.garbage; + // var demo : .ru.dz.demo.weather; + // var demo : .ru.dz.demo.wasm; void run(var console : .internal.io.tty) { - //cv = new .ru.dz.demo.chart(); - //cv.start(console); - - wv = new .ru.dz.demo.weather(); - wv.run(console); - + // demo = new .ru.dz.demo.weather(); + demo = new .ru.dz.demo.garbage(); + demo.run(console); } - }; - diff --git a/src/plib/sys/src/ru.dz/ru.dz.phantom.system.shell.ph b/src/plib/sys/src/ru.dz/ru.dz.phantom.system.shell.ph index 27ca1739e..920d30248 100755 --- a/src/plib/sys/src/ru.dz/ru.dz.phantom.system.shell.ph +++ b/src/plib/sys/src/ru.dz/ru.dz.phantom.system.shell.ph @@ -21,10 +21,7 @@ import .ru.dz.phantom.system.runnable; import .ru.dz.phantom.system.shell_callback; //import .test.suite; -import .ru.dz.demo.wasm; import .ru.dz.demo.start; -import .ru.dz.phantom.persistence_test; -import .ru.dz.phantom.performance_test; attribute const * ->!; @@ -54,9 +51,7 @@ class shell extends runnable //var mtx : .internal.mutex; - var demo : .ru.dz.demo.wasm; - // var demo : .ru.dz.demo.start; - // var demo : .ru.dz.phantom.persistence_test; + var demo : .ru.dz.demo.start; /* void init() { @@ -114,9 +109,7 @@ class shell extends runnable stat_conn = new .internal.connection(); stat_conn.connect("stt:"); - // demo = new .ru.dz.demo.start(); - demo = new .ru.dz.demo.wasm(); - // demo = new .ru.dz.phantom.persistence_test(); + demo = new .ru.dz.demo.start(); demo.run(console); while(1) diff --git a/src/run/img/phantom.superblock b/src/run/img/phantom.superblock index 52433ea14..ae51bd357 100644 Binary files a/src/run/img/phantom.superblock and b/src/run/img/phantom.superblock differ