diff -dur gnumach.orig/include/mach/mach.defs gnumach.tmp/include/mach/mach.defs --- gnumach.orig/include/mach/mach.defs 2010-05-04 11:56:55.000000000 +0200 +++ gnumach.tmp/include/mach/mach.defs 2010-05-12 16:16:48.000000000 +0200 @@ -409,17 +409,18 @@ reply_to : mach_port_t = MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic); -/* obsolete */ -routine xxx_task_get_emulation_vector( - task : task_t; - out vector_start : int; - out emulation_vector: xxx_emulation_vector_t, IsLong); -/* obsolete */ -routine xxx_task_set_emulation_vector( - task : task_t; - vector_start : int; - emulation_vector: xxx_emulation_vector_t, IsLong); +routine memory_object_sync_request( + memory_control : memory_object_control_t; + offset : vm_offset_t; + size : vm_size_t; + should_return : memory_object_return_t; + should_flush : boolean_t; + out dirty : boolean_t; + out start_offset : vm_offset_t; + out end_offset : vm_offset_t); + +skip; /* * Returns information about the host on which the diff -dur gnumach.orig/include/mach/memory_object.defs gnumach.tmp/include/mach/memory_object.defs --- gnumach.orig/include/mach/memory_object.defs 2010-05-04 11:56:55.000000000 +0200 +++ gnumach.tmp/include/mach/memory_object.defs 2010-05-12 16:16:48.000000000 +0200 @@ -85,25 +85,10 @@ simpleroutine memory_object_terminate( memory_object : memory_object_t = MACH_MSG_TYPE_MOVE_SEND - ctype: mach_port_t; + ctype: mach_port_t #if SEQNOS - msgseqno seqno : mach_port_seqno_t; +; msgseqno seqno : mach_port_seqno_t; #endif /* SEQNOS */ - memory_control : memory_object_control_t = - MACH_MSG_TYPE_MOVE_RECEIVE - ctype: mach_port_t -#if KERNEL_USER - /* for compatibility with Mach 2.5 kernels */ - , dealloc -#endif /* KERNEL_USER */ - ; - memory_object_name : memory_object_name_t = - MACH_MSG_TYPE_MOVE_RECEIVE - ctype: mach_port_t -#if KERNEL_USER - /* for compatibility with Mach 2.5 kernels */ - , dealloc -#endif /* KERNEL_USER */ ); /* diff -dur gnumach.orig/vm/memory_object.c gnumach.tmp/vm/memory_object.c --- gnumach.orig/vm/memory_object.c 2010-05-04 11:56:55.000000000 +0200 +++ gnumach.tmp/vm/memory_object.c 2010-05-12 16:16:46.000000000 +0200 @@ -892,6 +892,201 @@ } /* + * Routine: memory_object_sync_request [user interface] + * + * Description: + * This is a specialized version of m_o_lock_request for + * syncing memory objects. These are the differences between + * them: + * 1) if dirty pages are found, stops processing + * and sends them to the pager. Sets start_offset + * to the head of first dirty page, and end_offset + * to the end of last one. + * 3) doesn't change protection level. + * 4) as this is a RPC, there's no need for a reply + * port. + * The set of pages is defined by a starting offset + * ("offset") and size ("size"). Only pages with the + * same page alignment as the starting offset are + * considered. + */ +kern_return_t +memory_object_sync_request(object, offset, size, + should_return, should_flush, dirty, + start_offset, end_offset) + register vm_object_t object; + register vm_offset_t offset; + register vm_size_t size; + memory_object_return_t should_return; + boolean_t should_flush; + boolean_t *dirty; + vm_offset_t *start_offset; + vm_offset_t *end_offset; +{ + register vm_page_t m; + vm_offset_t original_offset = offset; + vm_size_t original_size = size; + vm_offset_t paging_offset = 0; + vm_object_t new_object = VM_OBJECT_NULL; + vm_offset_t new_offset = 0; + vm_offset_t first_offset = -1; + vm_offset_t last_offset = offset; + int page_lock_result; + int pageout_action = 0; /* '=0' to quiet lint */ + + /* + * We use DATA_WRITE_MAX and PAGEOUT definitions from + * m_o_lock_request. + */ + vm_page_t holding_pages[DATA_WRITE_MAX]; + + /* + * Check for bogus arguments. + */ + if (object == VM_OBJECT_NULL) + return (KERN_INVALID_ARGUMENT); + + size = round_page(size); + + /* + * Lock the object, and acquire a paging reference to + * prevent the memory_object and control ports from + * being destroyed. + */ + + vm_object_lock(object); + vm_object_paging_begin(object); + offset -= object->paging_offset; + + /* + * Search for a block of continuous pages to return to + * the pager. + */ + for (; + size != 0; + size -= PAGE_SIZE, offset += PAGE_SIZE) + { + /* + * If we have DATA_WRITE_MAX pages, stop searching. + */ + if (new_object != VM_OBJECT_NULL && + new_offset >= PAGE_SIZE * DATA_WRITE_MAX) + break; + + if ((m = vm_page_lookup(object, offset)) == VM_PAGE_NULL) + break; + + page_lock_result = memory_object_lock_page(m, + should_return, + should_flush, + VM_PROT_NO_CHANGE); + + if (page_lock_result == MEMORY_OBJECT_LOCK_RESULT_DONE) { + if (new_object != VM_OBJECT_NULL) { + /* + * We have found a clean page, after a group of + * dirty ones. Stop searching and return them. + */ + break; + } + continue; + } + else if (page_lock_result == + MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK) { + /* + * We need to block to return this page. If + * we have other pages pending, stop searching + * and return them. + */ + if (new_object != VM_OBJECT_NULL) { + break; + } + + printf("mosr: must block, waiting\n"); + + PAGE_ASSERT_WAIT(m, FALSE); + vm_object_unlock(object); + thread_block((void (*)()) 0); + vm_object_lock(object); + + printf("mosr: must block, continuing\n"); + + /* + * Check the same page again. + */ + size += PAGE_SIZE; + offset -= PAGE_SIZE; + continue; + } + else { + /* + * Mark the page busy. + */ + m->busy = TRUE; + vm_object_unlock(object); + + /* + * If we have not already allocated an object + * for a range of pages to be written, do so + * now. + */ + if (new_object == VM_OBJECT_NULL) { + new_object = vm_object_allocate(DATA_WRITE_MAX); + new_offset = 0; + paging_offset = m->offset + + object->paging_offset; + pageout_action = page_lock_result; + } + + /* + * Move or copy the dirty page into the + * new object. + */ + m = vm_pageout_setup(m, + m->offset + object->paging_offset, + new_object, + new_offset, + should_flush); + + /* + * Save the holding page if there is one. + */ + holding_pages[atop(new_offset)] = m; + + if (first_offset == -1) + first_offset = offset; + new_offset += PAGE_SIZE; + last_offset = offset + PAGE_SIZE; + + vm_object_lock(object); + } + } + + /* + * We have completed the scan for applicable pages. + * Clean any pages that have been saved, and set dirty, + * start_offset and end_offset properly. + */ + if (new_object != VM_OBJECT_NULL) { + PAGEOUT_PAGES; + *dirty = TRUE; + *start_offset = first_offset; + *end_offset = last_offset; + } + else { + *dirty = FALSE; + *start_offset = 0; + *end_offset = 0; + } + + vm_object_paging_end(object); + vm_object_unlock(object); + vm_object_deallocate(object); + + return (KERN_SUCCESS); +} + +/* * Old version of memory_object_lock_request. */ kern_return_t diff -dur gnumach.orig/vm/vm_object.c gnumach.tmp/vm/vm_object.c --- gnumach.orig/vm/vm_object.c 2010-05-04 11:56:55.000000000 +0200 +++ gnumach.tmp/vm/vm_object.c 2010-05-12 16:16:46.000000000 +0200 @@ -710,9 +710,13 @@ ip_reference(pager); /* - * Terminate the pager. + * Dealloc ports and terminate the pager. */ - (void) memory_object_terminate(pager, pager_request, pager_name); + + ipc_port_dealloc_kernel(pager_request); + ipc_port_dealloc_kernel(pager_name); + + (void) memory_object_terminate(pager); /* * Wakeup anyone waiting for this terminate