11 #include "hashtable.h"
13 #include "snapshotimp.h"
17 #define FAILURE(mesg) { model_print("failed in the API: %s with errno relative message: %s\n", mesg, strerror(errno)); exit(EXIT_FAILURE); }
20 #define SSDEBUG model_print
22 #define SSDEBUG(...) do { } while (0)
25 /* extern declaration definition */
26 struct SnapShot *snapshotrecord = NULL;
28 /** PageAlignedAdressUpdate return a page aligned address for the
29 * address being added as a side effect the numBytes are also changed.
31 static void * PageAlignAddressUpward(void *addr)
33 return (void *)((((uintptr_t)addr) + PAGESIZE - 1) & ~(PAGESIZE - 1));
36 #if !USE_MPROTECT_SNAPSHOT
38 * These variables are necessary because the stack is shared region and
39 * there exists a race between all processes executing the same function.
40 * To avoid the problem above, we require variables allocated in 'safe' regions.
41 * The bug was actually observed with the forkID, these variables below are
42 * used to indicate the various contexts to which to switch to.
44 * @savedSnapshotContext: contains the point to which takesnapshot() call should switch to.
45 * @savedUserSnapshotContext: contains the point to which the process whose snapshotid is equal to the rollbackid should switch to
46 * @snapshotid: it is a running counter for the various forked processes snapshotid. it is incremented and set in a persistently shared record
48 static ucontext_t savedSnapshotContext;
49 static ucontext_t savedUserSnapshotContext;
50 static snapshot_id snapshotid = 0;
52 #else /* USE_MPROTECT_SNAPSHOT */
54 /** ReturnPageAlignedAddress returns a page aligned address for the
55 * address being added as a side effect the numBytes are also changed.
57 static void * ReturnPageAlignedAddress(void *addr)
59 return (void *)(((uintptr_t)addr) & ~(PAGESIZE - 1));
62 /** The initSnapShotRecord method initialized the snapshotting data
63 * structures for the mprotect based snapshot.
65 static void initSnapShotRecord(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions)
67 snapshotrecord = (struct SnapShot *)model_malloc(sizeof(struct SnapShot));
68 snapshotrecord->regionsToSnapShot = (struct MemoryRegion *)model_malloc(sizeof(struct MemoryRegion) * nummemoryregions);
69 snapshotrecord->backingStoreBasePtr = (void *)model_malloc(sizeof(snapshot_page_t) * (numbackingpages + 1));
70 //Page align the backingstorepages
71 snapshotrecord->backingStore = (snapshot_page_t *)PageAlignAddressUpward(snapshotrecord->backingStoreBasePtr);
72 snapshotrecord->backingRecords = (struct BackingPageRecord *)model_malloc(sizeof(struct BackingPageRecord) * numbackingpages);
73 snapshotrecord->snapShots = (struct SnapShotRecord *)model_malloc(sizeof(struct SnapShotRecord) * numsnapshots);
74 snapshotrecord->lastSnapShot = 0;
75 snapshotrecord->lastBackingPage = 0;
76 snapshotrecord->lastRegion = 0;
77 snapshotrecord->maxRegions = nummemoryregions;
78 snapshotrecord->maxBackingPages = numbackingpages;
79 snapshotrecord->maxSnapShots = numsnapshots;
82 /** HandlePF is the page fault handler for mprotect based snapshotting
85 static void HandlePF(int sig, siginfo_t *si, void *unused)
87 if (si->si_code == SEGV_MAPERR) {
88 model_print("Real Fault at %p\n", si->si_addr);
90 model_print("For debugging, place breakpoint at: %s:%d\n",
94 void* addr = ReturnPageAlignedAddress(si->si_addr);
96 unsigned int backingpage = snapshotrecord->lastBackingPage++; //Could run out of pages...
97 if (backingpage == snapshotrecord->maxBackingPages) {
98 model_print("Out of backing pages at %p\n", si->si_addr);
103 memcpy(&(snapshotrecord->backingStore[backingpage]), addr, sizeof(snapshot_page_t));
104 //remember where to copy page back to
105 snapshotrecord->backingRecords[backingpage].basePtrOfPage = addr;
106 //set protection to read/write
107 if (mprotect(addr, sizeof(snapshot_page_t), PROT_READ | PROT_WRITE)) {
109 // Handle error by quitting?
112 #endif /* USE_MPROTECT_SNAPSHOT */
114 #if !USE_MPROTECT_SNAPSHOT
115 static void createSharedMemory()
117 //step 1. create shared memory.
118 void *memMapBase = mmap(0, SHARED_MEMORY_DEFAULT + STACK_SIZE_DEFAULT, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
119 if (MAP_FAILED == memMapBase)
122 //Setup snapshot record at top of free region
123 snapshotrecord = (struct SnapShot *)memMapBase;
124 snapshotrecord->mSharedMemoryBase = (void *)((uintptr_t)memMapBase + sizeof(struct SnapShot));
125 snapshotrecord->mStackBase = (void *)((uintptr_t)memMapBase + SHARED_MEMORY_DEFAULT);
126 snapshotrecord->mStackSize = STACK_SIZE_DEFAULT;
127 snapshotrecord->mIDToRollback = -1;
128 snapshotrecord->currSnapShotID = 0;
132 * Create a new mspace pointer for the non-snapshotting (i.e., inter-process
133 * shared) memory region. Only for fork-based snapshotting.
135 * @return The shared memory mspace
137 mspace create_shared_mspace()
140 createSharedMemory();
141 return create_mspace_with_base((void *)(snapshotrecord->mSharedMemoryBase), SHARED_MEMORY_DEFAULT - sizeof(struct SnapShot), 1);
146 /** The initSnapshotLibrary function initializes the snapshot library.
147 * @param entryPoint the function that should run the program.
149 #if USE_MPROTECT_SNAPSHOT
151 void initSnapshotLibrary(unsigned int numbackingpages,
152 unsigned int numsnapshots, unsigned int nummemoryregions,
153 unsigned int numheappages, VoidFuncPtr entryPoint)
155 /* Setup a stack for our signal handler.... */
157 ss.ss_sp = PageAlignAddressUpward(model_malloc(SIGSTACKSIZE + PAGESIZE - 1));
158 ss.ss_size = SIGSTACKSIZE;
160 sigaltstack(&ss, NULL);
163 sa.sa_flags = SA_SIGINFO | SA_NODEFER | SA_RESTART | SA_ONSTACK;
164 sigemptyset(&sa.sa_mask);
165 sa.sa_sigaction = HandlePF;
167 if (sigaction(SIGBUS, &sa, NULL) == -1) {
168 model_print("SIGACTION CANNOT BE INSTALLED\n");
172 if (sigaction(SIGSEGV, &sa, NULL) == -1) {
173 model_print("SIGACTION CANNOT BE INSTALLED\n");
177 initSnapShotRecord(numbackingpages, numsnapshots, nummemoryregions);
179 // EVIL HACK: We need to make sure that calls into the HandlePF method don't cause dynamic links
180 // The problem is that we end up protecting state in the dynamic linker...
181 // Solution is to call our signal handler before we start protecting stuff...
184 memset(&si, 0, sizeof(si));
185 si.si_addr = ss.ss_sp;
186 HandlePF(SIGSEGV, &si, NULL);
187 snapshotrecord->lastBackingPage--; //remove the fake page we copied
189 void *basemySpace = model_malloc((numheappages + 1) * PAGESIZE);
190 void *pagealignedbase = PageAlignAddressUpward(basemySpace);
191 user_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
192 addMemoryRegionToSnapShot(pagealignedbase, numheappages);
194 void *base_model_snapshot_space = model_malloc((numheappages + 1) * PAGESIZE);
195 pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space);
196 model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
197 addMemoryRegionToSnapShot(pagealignedbase, numheappages);
202 void initSnapshotLibrary(unsigned int numbackingpages,
203 unsigned int numsnapshots, unsigned int nummemoryregions,
204 unsigned int numheappages, VoidFuncPtr entryPoint)
207 createSharedMemory();
209 void *base_model_snapshot_space = malloc((numheappages + 1) * PAGESIZE);
210 void *pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space);
211 model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
213 //step 2 setup the stack context.
214 ucontext_t newContext;
215 getcontext(&newContext);
216 newContext.uc_stack.ss_sp = snapshotrecord->mStackBase;
217 newContext.uc_stack.ss_size = STACK_SIZE_DEFAULT;
218 makecontext(&newContext, entryPoint, 0);
219 /* switch to a new entryPoint context, on a new stack */
220 swapcontext(&savedSnapshotContext, &newContext);
222 /* switch back here when takesnapshot is called */
224 snapshotid = snapshotrecord->currSnapShotID;
225 /* This bool indicates that the current process's snapshotid is same
226 as the id to which the rollback needs to occur */
228 bool rollback = false;
230 snapshotrecord->currSnapShotID = snapshotid + 1;
234 /* If the rollback bool is set, switch to the context we need to
235 return to during a rollback. */
237 setcontext(&(snapshotrecord->mContextToRollback));
239 /*Child process which is forked as a result of takesnapshot
240 call should switch back to the takesnapshot context*/
241 setcontext(&savedUserSnapshotContext);
247 SSDEBUG("The process id of child is %d and the process id of this process is %d and snapshot id is %d\n",
248 forkedID, getpid(), snapshotid);
251 retVal = waitpid(forkedID, &status, 0);
252 } while (-1 == retVal && errno == EINTR);
254 if (snapshotrecord->mIDToRollback != snapshotid) {
263 /** The addMemoryRegionToSnapShot function assumes that addr is page aligned.
265 void addMemoryRegionToSnapShot(void *addr, unsigned int numPages)
267 #if USE_MPROTECT_SNAPSHOT
268 unsigned int memoryregion = snapshotrecord->lastRegion++;
269 if (memoryregion == snapshotrecord->maxRegions) {
270 model_print("Exceeded supported number of memory regions!\n");
274 snapshotrecord->regionsToSnapShot[memoryregion].basePtr = addr;
275 snapshotrecord->regionsToSnapShot[memoryregion].sizeInPages = numPages;
276 #endif //NOT REQUIRED IN THE CASE OF FORK BASED SNAPSHOTS.
279 /** The takeSnapshot function takes a snapshot.
280 * @return The snapshot identifier.
282 snapshot_id takeSnapshot()
284 #if USE_MPROTECT_SNAPSHOT
285 for (unsigned int region = 0; region < snapshotrecord->lastRegion; region++) {
286 if (mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ) == -1) {
288 model_print("Failed to mprotect inside of takeSnapShot\n");
292 unsigned int snapshot = snapshotrecord->lastSnapShot++;
293 if (snapshot == snapshotrecord->maxSnapShots) {
294 model_print("Out of snapshots\n");
297 snapshotrecord->snapShots[snapshot].firstBackingPage = snapshotrecord->lastBackingPage;
301 swapcontext(&savedUserSnapshotContext, &savedSnapshotContext);
302 SSDEBUG("TAKESNAPSHOT RETURN\n");
307 /** The rollBack function rollback to the given snapshot identifier.
308 * @param theID is the snapshot identifier to rollback to.
310 void rollBack(snapshot_id theID)
312 #if USE_MPROTECT_SNAPSHOT == 2
313 if (snapshotrecord->lastSnapShot == (theID + 1)) {
314 for (unsigned int page = snapshotrecord->snapShots[theID].firstBackingPage; page < snapshotrecord->lastBackingPage; page++) {
315 memcpy(snapshotrecord->backingRecords[page].basePtrOfPage, &snapshotrecord->backingStore[page], sizeof(snapshot_page_t));
321 #if USE_MPROTECT_SNAPSHOT
322 HashTable< void *, bool, uintptr_t, 4, model_malloc, model_calloc, model_free> duplicateMap;
323 for (unsigned int region = 0; region < snapshotrecord->lastRegion; region++) {
324 if (mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ | PROT_WRITE) == -1) {
326 model_print("Failed to mprotect inside of takeSnapShot\n");
330 for (unsigned int page = snapshotrecord->snapShots[theID].firstBackingPage; page < snapshotrecord->lastBackingPage; page++) {
331 if (!duplicateMap.contains(snapshotrecord->backingRecords[page].basePtrOfPage)) {
332 duplicateMap.put(snapshotrecord->backingRecords[page].basePtrOfPage, true);
333 memcpy(snapshotrecord->backingRecords[page].basePtrOfPage, &snapshotrecord->backingStore[page], sizeof(snapshot_page_t));
336 snapshotrecord->lastSnapShot = theID;
337 snapshotrecord->lastBackingPage = snapshotrecord->snapShots[theID].firstBackingPage;
338 takeSnapshot(); //Make sure current snapshot is still good...All later ones are cleared
340 snapshotrecord->mIDToRollback = theID;
341 volatile int sTemp = 0;
342 getcontext(&snapshotrecord->mContextToRollback);
344 * This is used to quit the process on rollback, so that the process
345 * which needs to rollback can quit allowing the process whose
346 * snapshotid matches the rollbackid to switch to this context and
351 SSDEBUG("Invoked rollback\n");
355 * This fix obviates the need for a finalize call. hence less dependences for model-checker....
357 snapshotrecord->mIDToRollback = -1;