X-Git-Url: http://plrg.eecs.uci.edu/git/?p=model-checker.git;a=blobdiff_plain;f=snapshot.cc;h=e006189c44ba1741208183ea8cbddd222b381dfd;hp=eeeb58c995762ea9c01bf40f2515ef5f956b7d50;hb=b5000a06086de6ea8799168d463f018cab785830;hpb=06c46bd5c02bb68c1a75edc4423e5281e497c6c1 diff --git a/snapshot.cc b/snapshot.cc index eeeb58c..e006189 100644 --- a/snapshot.cc +++ b/snapshot.cc @@ -3,7 +3,7 @@ #include #include #include -#include +#include "hashtable.h" #include #include #include "snapshot.h" @@ -17,6 +17,8 @@ #include #include +#include "common.h" + #define FAILURE(mesg) { printf("failed in the API: %s with errno relative message: %s\n", mesg, strerror( errno ) ); exit(EXIT_FAILURE); } #ifdef CONFIG_SSDEBUG @@ -26,17 +28,7 @@ #endif /* extern declaration definition */ -#if USE_MPROTECT_SNAPSHOT struct SnapShot * snapshotrecord = NULL; -#else -struct Snapshot * sTheRecord = NULL; -#endif - -#if !USE_MPROTECT_SNAPSHOT -static ucontext_t savedSnapshotContext; -static ucontext_t savedUserSnapshotContext; -static snapshot_id snapshotid = 0; -#endif /** PageAlignedAdressUpdate return a page aligned address for the * address being added as a side effect the numBytes are also changed. @@ -45,7 +37,23 @@ static void * PageAlignAddressUpward(void * addr) { return (void *)((((uintptr_t)addr)+PAGESIZE-1)&~(PAGESIZE-1)); } -#if USE_MPROTECT_SNAPSHOT +#if !USE_MPROTECT_SNAPSHOT +/** @statics +* These variables are necessary because the stack is shared region and +* there exists a race between all processes executing the same function. +* To avoid the problem above, we require variables allocated in 'safe' regions. +* The bug was actually observed with the forkID, these variables below are +* used to indicate the various contexts to which to switch to. +* +* @savedSnapshotContext: contains the point to which takesnapshot() call should switch to. +* @savedUserSnapshotContext: contains the point to which the process whose snapshotid is equal to the rollbackid should switch to +* @snapshotid: it is a running counter for the various forked processes snapshotid. it is incremented and set in a persistently shared record +*/ +static ucontext_t savedSnapshotContext; +static ucontext_t savedUserSnapshotContext; +static snapshot_id snapshotid = 0; + +#else /* USE_MPROTECT_SNAPSHOT */ /** ReturnPageAlignedAddress returns a page aligned address for the * address being added as a side effect the numBytes are also changed. @@ -55,16 +63,16 @@ static void * ReturnPageAlignedAddress(void * addr) { } /** The initSnapShotRecord method initialized the snapshotting data - * structures for the mprotect based snapshot. + * structures for the mprotect based snapshot. */ static void initSnapShotRecord(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions) { - snapshotrecord=( struct SnapShot * )MYMALLOC(sizeof(struct SnapShot)); - snapshotrecord->regionsToSnapShot=( struct MemoryRegion * )MYMALLOC(sizeof(struct MemoryRegion)*nummemoryregions); - snapshotrecord->backingStoreBasePtr= ( struct SnapShotPage * )MYMALLOC( sizeof( struct SnapShotPage ) * (numbackingpages + 1) ); + snapshotrecord=( struct SnapShot * )model_malloc(sizeof(struct SnapShot)); + snapshotrecord->regionsToSnapShot=( struct MemoryRegion * )model_malloc(sizeof(struct MemoryRegion)*nummemoryregions); + snapshotrecord->backingStoreBasePtr= ( struct SnapShotPage * )model_malloc( sizeof( struct SnapShotPage ) * (numbackingpages + 1) ); //Page align the backingstorepages snapshotrecord->backingStore=( struct SnapShotPage * )PageAlignAddressUpward(snapshotrecord->backingStoreBasePtr); - snapshotrecord->backingRecords=( struct BackingPageRecord * )MYMALLOC(sizeof(struct BackingPageRecord)*numbackingpages); - snapshotrecord->snapShots= ( struct SnapShotRecord * )MYMALLOC(sizeof(struct SnapShotRecord)*numsnapshots); + snapshotrecord->backingRecords=( struct BackingPageRecord * )model_malloc(sizeof(struct BackingPageRecord)*numbackingpages); + snapshotrecord->snapShots= ( struct SnapShotRecord * )model_malloc(sizeof(struct SnapShotRecord)*numsnapshots); snapshotrecord->lastSnapShot=0; snapshotrecord->lastBackingPage=0; snapshotrecord->lastRegion=0; @@ -79,6 +87,7 @@ static void initSnapShotRecord(unsigned int numbackingpages, unsigned int numsna static void HandlePF( int sig, siginfo_t *si, void * unused){ if( si->si_code == SEGV_MAPERR ){ printf("Real Fault at %p\n", si->si_addr); + print_trace(); exit( EXIT_FAILURE ); } void* addr = ReturnPageAlignedAddress(si->si_addr); @@ -99,40 +108,37 @@ static void HandlePF( int sig, siginfo_t *si, void * unused){ // Handle error by quitting? } } -#endif //nothing to handle for non snapshotting case. +#endif /* USE_MPROTECT_SNAPSHOT */ -void createSharedLibrary(){ #if !USE_MPROTECT_SNAPSHOT +void createSharedMemory(){ //step 1. create shared memory. - if ( sTheRecord ) - return; - int fd = shm_open( "/ModelChecker-Snapshotter", O_RDWR | O_CREAT, 0777 ); //universal permissions. - if ( -1 == fd ) - FAILURE("shm_open"); - if ( -1 == ftruncate( fd, SHARED_MEMORY_DEFAULT + STACK_SIZE_DEFAULT ) ) - FAILURE( "ftruncate" ); - void * memMapBase = mmap( 0, SHARED_MEMORY_DEFAULT + STACK_SIZE_DEFAULT, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 ); + void * memMapBase = mmap( 0, SHARED_MEMORY_DEFAULT + STACK_SIZE_DEFAULT, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANON, -1, 0 ); if( MAP_FAILED == memMapBase ) FAILURE("mmap"); - sTheRecord = ( struct Snapshot * )memMapBase; - sTheRecord->mSharedMemoryBase = (void *)((uintptr_t)memMapBase + sizeof(struct Snapshot)); - sTheRecord->mStackBase = (void *)((uintptr_t)memMapBase + SHARED_MEMORY_DEFAULT); - sTheRecord->mStackSize = STACK_SIZE_DEFAULT; - sTheRecord->mIDToRollback = -1; - sTheRecord->currSnapShotID = 0; -#endif + + //Setup snapshot record at top of free region + snapshotrecord = ( struct SnapShot * )memMapBase; + snapshotrecord->mSharedMemoryBase = (void *)((uintptr_t)memMapBase + sizeof(struct SnapShot)); + snapshotrecord->mStackBase = (void *)((uintptr_t)memMapBase + SHARED_MEMORY_DEFAULT); + snapshotrecord->mStackSize = STACK_SIZE_DEFAULT; + snapshotrecord->mIDToRollback = -1; + snapshotrecord->currSnapShotID = 0; } +#endif + -/** The initSnapShotLibrary function initializes the Snapshot library. +/** The initSnapshotLibrary function initializes the snapshot library. * @param entryPoint the function that should run the program. */ -void initSnapShotLibrary(unsigned int numbackingpages, +#if USE_MPROTECT_SNAPSHOT + +void initSnapshotLibrary(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions, unsigned int numheappages, VoidFuncPtr entryPoint) { -#if USE_MPROTECT_SNAPSHOT /* Setup a stack for our signal handler.... */ stack_t ss; - ss.ss_sp = MYMALLOC(SIGSTACKSIZE); + ss.ss_sp = PageAlignAddressUpward(model_malloc(SIGSTACKSIZE+PAGESIZE-1)); ss.ss_size = SIGSTACKSIZE; ss.ss_flags = 0; sigaltstack(&ss, NULL); @@ -164,75 +170,80 @@ void initSnapShotLibrary(unsigned int numbackingpages, HandlePF(SIGSEGV, &si, NULL); snapshotrecord->lastBackingPage--; //remove the fake page we copied - basemySpace=MYMALLOC((numheappages+1)*PAGESIZE); + void *basemySpace = model_malloc((numheappages+1)*PAGESIZE); void * pagealignedbase=PageAlignAddressUpward(basemySpace); - mySpace = create_mspace_with_base(pagealignedbase, numheappages*PAGESIZE, 1 ); + user_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1); addMemoryRegionToSnapShot(pagealignedbase, numheappages); + + void *base_model_snapshot_space = model_malloc((numheappages + 1) * PAGESIZE); + pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space); + model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1); + addMemoryRegionToSnapShot(pagealignedbase, numheappages); + entryPoint(); +} #else +void initSnapshotLibrary(unsigned int numbackingpages, + unsigned int numsnapshots, unsigned int nummemoryregions, + unsigned int numheappages, VoidFuncPtr entryPoint) { + if (!snapshotrecord) + createSharedMemory(); - basemySpace=system_malloc((numheappages+1)*PAGESIZE); - void * pagealignedbase=PageAlignAddressUpward(basemySpace); - mySpace = create_mspace_with_base(pagealignedbase, numheappages*PAGESIZE, 1 ); - createSharedLibrary(); + void *base_model_snapshot_space = malloc((numheappages + 1) * PAGESIZE); + void *pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space); + model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1); //step 2 setup the stack context. - - int alreadySwapped = 0; - getcontext( &savedSnapshotContext ); - if( !alreadySwapped ){ - alreadySwapped = 1; - ucontext_t currentContext, swappedContext, newContext; - getcontext( &newContext ); - newContext.uc_stack.ss_sp = sTheRecord->mStackBase; - newContext.uc_stack.ss_size = STACK_SIZE_DEFAULT; - newContext.uc_link = ¤tContext; - makecontext( &newContext, entryPoint, 0 ); - swapcontext( &swappedContext, &newContext ); - } - - //add the code to take a snapshot here... - //to return to user process, do a second swapcontext... + ucontext_t newContext; + getcontext( &newContext ); + newContext.uc_stack.ss_sp = snapshotrecord->mStackBase; + newContext.uc_stack.ss_size = STACK_SIZE_DEFAULT; + makecontext( &newContext, entryPoint, 0 ); + /* switch to a new entryPoint context, on a new stack */ + swapcontext(&savedSnapshotContext, &newContext); + + /* switch back here when takesnapshot is called */ pid_t forkedID = 0; - snapshotid = sTheRecord->currSnapShotID; - bool swapContext = false; + snapshotid = snapshotrecord->currSnapShotID; + /* This bool indicates that the current process's snapshotid is same + as the id to which the rollback needs to occur */ + + bool rollback = false; while( true ){ - sTheRecord->currSnapShotID=snapshotid+1; + snapshotrecord->currSnapShotID=snapshotid+1; forkedID = fork(); + if( 0 == forkedID ){ - ucontext_t currentContext; -#if 0 - int dbg = 0; - while( !dbg ); -#endif - if( swapContext ) - swapcontext( ¤tContext, &( sTheRecord->mContextToRollback ) ); - else{ - swapcontext( ¤tContext, &savedUserSnapshotContext ); + /* If the rollback bool is set, switch to the context we need to + return to during a rollback. */ + if( rollback) { + setcontext( &( snapshotrecord->mContextToRollback ) ); + } else { + /*Child process which is forked as a result of takesnapshot + call should switch back to the takesnapshot context*/ + setcontext( &savedUserSnapshotContext ); } } else { int status; int retVal; - SSDEBUG("The process id of child is %d and the process id of this process is %d and snapshot id is %d", + SSDEBUG("The process id of child is %d and the process id of this process is %d and snapshot id is %d\n", forkedID, getpid(), snapshotid ); do { retVal=waitpid( forkedID, &status, 0 ); } while( -1 == retVal && errno == EINTR ); - if( sTheRecord->mIDToRollback != snapshotid ) + if( snapshotrecord->mIDToRollback != snapshotid ) { exit(EXIT_SUCCESS); - else{ - swapContext = true; } + rollback = true; } } - -#endif } +#endif -/** The addMemoryRegionToSnapShot function assumes that addr is page aligned. +/** The addMemoryRegionToSnapShot function assumes that addr is page aligned. */ void addMemoryRegionToSnapShot( void * addr, unsigned int numPages) { #if USE_MPROTECT_SNAPSHOT @@ -269,6 +280,7 @@ snapshot_id takeSnapshot( ){ return snapshot; #else swapcontext( &savedUserSnapshotContext, &savedSnapshotContext ); + SSDEBUG("TAKESNAPSHOT RETURN\n"); return snapshotid; #endif } @@ -277,8 +289,17 @@ snapshot_id takeSnapshot( ){ * @param theID is the snapshot identifier to rollback to. */ void rollBack( snapshot_id theID ){ +#if USE_MPROTECT_SNAPSHOT==2 + if (snapshotrecord->lastSnapShot==(theID+1)) { + for(unsigned int page=snapshotrecord->snapShots[theID].firstBackingPage; pagelastBackingPage; page++) { + memcpy(snapshotrecord->backingRecords[page].basePtrOfPage, &snapshotrecord->backingStore[page], sizeof(struct SnapShotPage)); + } + return; + } +#endif + #if USE_MPROTECT_SNAPSHOT - std::map< void *, bool, std::less< void * >, MyAlloc< std::pair< const void *, bool > > > duplicateMap; + HashTable< void *, bool, uintptr_t, 4, model_malloc, model_calloc, model_free> duplicateMap; for(unsigned int region=0; regionlastRegion;region++) { if( mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages*sizeof(struct SnapShotPage), PROT_READ | PROT_WRITE ) == -1 ){ perror("mprotect"); @@ -287,14 +308,8 @@ void rollBack( snapshot_id theID ){ } } for(unsigned int page=snapshotrecord->snapShots[theID].firstBackingPage; pagelastBackingPage; page++) { - bool oldVal = false; - if( duplicateMap.find( snapshotrecord->backingRecords[page].basePtrOfPage ) != duplicateMap.end() ){ - oldVal = true; - } - else{ - duplicateMap[ snapshotrecord->backingRecords[page].basePtrOfPage ] = true; - } - if( !oldVal ){ + if( !duplicateMap.contains(snapshotrecord->backingRecords[page].basePtrOfPage )) { + duplicateMap.put(snapshotrecord->backingRecords[page].basePtrOfPage, true); memcpy(snapshotrecord->backingRecords[page].basePtrOfPage, &snapshotrecord->backingStore[page], sizeof(struct SnapShotPage)); } } @@ -302,15 +317,25 @@ void rollBack( snapshot_id theID ){ snapshotrecord->lastBackingPage=snapshotrecord->snapShots[theID].firstBackingPage; takeSnapshot(); //Make sure current snapshot is still good...All later ones are cleared #else - sTheRecord->mIDToRollback = theID; - int sTemp = 0; - getcontext( &sTheRecord->mContextToRollback ); + snapshotrecord->mIDToRollback = theID; + volatile int sTemp = 0; + getcontext( &snapshotrecord->mContextToRollback ); + /* + * This is used to quit the process on rollback, so that the process + * which needs to rollback can quit allowing the process whose + * snapshotid matches the rollbackid to switch to this context and + * continue.... + */ if( !sTemp ){ sTemp = 1; - SSDEBUG("Invoked rollback"); + SSDEBUG("Invoked rollback\n"); exit(EXIT_SUCCESS); } - sTheRecord->mIDToRollback = -1; + /* + * This fix obviates the need for a finalize call. hence less dependences for model-checker.... + * + */ + snapshotrecord->mIDToRollback = -1; #endif }