fix git conflict
authorweiyu <weiyuluo1232@gmail.com>
Mon, 1 Jul 2019 18:47:38 +0000 (11:47 -0700)
committerweiyu <weiyuluo1232@gmail.com>
Mon, 1 Jul 2019 18:47:38 +0000 (11:47 -0700)
34 files changed:
Makefile
action.h
cmodelint.cc
common.h
execution.cc
execution.h
futex.cc
include/atomic2
include/cmodelint.h
include/condition_variable
include/impatomic.h
include/librace.h
include/memoryorder.h
include/model-assert.h
include/modeltypes.h
include/mutex.h
include/mypthread.h
include/stdatomic2.h
include/threads.h
include/wildcard.h
main.cc
model.cc
model.h
mutex.cc
mymemory.cc
nodestack.h
params.h
printf.c [new file with mode: 0644]
printf.h [new file with mode: 0644]
pthread.cc
snapshot-interface.h
snapshot.cc
test/condvar.cc
threads-model.h

index fcebbf36c21ebcfd7988238d8ee3799fb00e5f27..214873ac0e0a7edacade3df693bbdc94193675c1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -5,7 +5,7 @@ OBJECTS := libthreads.o schedule.o model.o threads.o librace.o action.o \
           datarace.o impatomic.o cmodelint.o \
           snapshot.o malloc.o mymemory.o common.o mutex.o conditionvariable.o \
           context.o execution.o libannotate.o plugins.o pthread.o futex.o fuzzer.o \
-          sleeps.o history.o funcnode.o
+          sleeps.o history.o funcnode.o printf.o
 
 CPPFLAGS += -Iinclude -I.
 LDFLAGS := -ldl -lrt -rdynamic
@@ -34,7 +34,7 @@ README.html: README.md
        $(MARKDOWN) $< > $@
 
 malloc.o: malloc.c
-       $(CC) -fPIC -c malloc.c -DMSPACES -DONLY_MSPACES -DHAVE_MMAP=0 $(CPPFLAGS) -Wno-unused-variable
+       $(CC) -fPIC -c malloc.c -DMSPACES -DONLY_MSPACES -DHAVE_MMAP=1 $(CPPFLAGS) -Wno-unused-variable
 
 #futex.o: futex.cc
 #      $(CXX) -fPIC -c futex.cc -std=c++11 $(CPPFLAGS)
@@ -92,3 +92,5 @@ pdfs: $(patsubst %.dot,%.pdf,$(wildcard *.dot))
 tabbing:
        uncrustify -c C.cfg --no-backup --replace *.cc
        uncrustify -c C.cfg --no-backup --replace *.h
+       uncrustify -c C.cfg --no-backup --replace include/*
+
index a2a947bde582b0c925a1ee713483a9b961d4c432..c7703094d99ffe12213ec59a44b8c9ef0fdcc7a3 100644 (file)
--- a/action.h
+++ b/action.h
@@ -177,7 +177,7 @@ public:
        /* to accomodate pthread create and join */
        Thread * thread_operand;
        void set_thread_operand(Thread *th) { thread_operand = th; }
-       MEMALLOC
+       SNAPSHOTALLOC
 private:
        const char * get_type_str() const;
        const char * get_mo_str() const;
index 5be511c86f0574b5a707a9ddabaa9e90156348eb..b1a8eb43d3258add2ff3d8af5f821ab566669105 100644 (file)
@@ -6,6 +6,7 @@
 #include "action.h"
 #include "history.h"
 #include "cmodelint.h"
+#include "snapshot-interface.h"
 #include "threads-model.h"
 
 memory_order orders[6] = {
@@ -13,6 +14,14 @@ memory_order orders[6] = {
        memory_order_release, memory_order_acq_rel, memory_order_seq_cst
 };
 
+static void ensureModel() {
+       if (!model) {
+               snapshot_system_init(10000, 1024, 1024, 40000);
+               model = new ModelChecker();
+               model->startChecker();
+       }
+}
+
 /** Performs a read action.*/
 uint64_t model_read_action(void * obj, memory_order ord) {
        return model->switch_to_master(new ModelAction(ATOMIC_READ, ord, obj));
@@ -64,94 +73,83 @@ void model_fence_action(memory_order ord) {
 
 /* ---  helper functions --- */
 uint64_t model_rmwrcas_action_helper(void *obj, int atomic_index, uint64_t oldval, int size, const char *position) {
-       return model->switch_to_master(
-               new ModelAction(ATOMIC_RMWRCAS, position, orders[atomic_index], obj, oldval, size)
-               );
+       ensureModel();
+       return model->switch_to_master(new ModelAction(ATOMIC_RMWRCAS, position, orders[atomic_index], obj, oldval, size));
 }
 
 uint64_t model_rmwr_action_helper(void *obj, int atomic_index, const char *position) {
-       return model->switch_to_master(
-               new ModelAction(ATOMIC_RMWR, position, orders[atomic_index], obj)
-               );
+       ensureModel();
+       return model->switch_to_master(new ModelAction(ATOMIC_RMWR, position, orders[atomic_index], obj));
 }
 
 void model_rmw_action_helper(void *obj, uint64_t val, int atomic_index, const char * position) {
-       model->switch_to_master(
-               new ModelAction(ATOMIC_RMW, position, orders[atomic_index], obj, val)
-               );
+       ensureModel();
+       model->switch_to_master(new ModelAction(ATOMIC_RMW, position, orders[atomic_index], obj, val));
 }
 
 void model_rmwc_action_helper(void *obj, int atomic_index, const char *position) {
-       model->switch_to_master(
-               new ModelAction(ATOMIC_RMWC, position, orders[atomic_index], obj)
-               );
+       ensureModel();
+       model->switch_to_master(new ModelAction(ATOMIC_RMWC, position, orders[atomic_index], obj));
 }
 
 // cds atomic inits
 void cds_atomic_init8(void * obj, uint8_t val, const char * position) {
-       model->switch_to_master(
-               new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val)
-               );
+       ensureModel();
+       model->switch_to_master(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val));
 }
 void cds_atomic_init16(void * obj, uint16_t val, const char * position) {
-       model->switch_to_master(
-               new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val)
-               );
+       ensureModel();
+       model->switch_to_master(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val));
 }
 void cds_atomic_init32(void * obj, uint32_t val, const char * position) {
-       model->switch_to_master(
-               new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val)
-               );
+       ensureModel();
+       model->switch_to_master(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val));
 }
 void cds_atomic_init64(void * obj, uint64_t val, const char * position) {
-       model->switch_to_master(
-               new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, val)
-               );
+       ensureModel();
+       model->switch_to_master(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, val));
 }
 
 
 // cds atomic loads
 uint8_t cds_atomic_load8(void * obj, int atomic_index, const char * position) {
-       return (uint8_t) ( model->switch_to_master(
-                                                                                        new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj))
-                                                                                );
+       ensureModel();
+       return (uint8_t) model->switch_to_master(
+               new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj));
 }
 uint16_t cds_atomic_load16(void * obj, int atomic_index, const char * position) {
-       return (uint16_t) ( model->switch_to_master(
-                                                                                               new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj))
-                                                                                       );
+       ensureModel();
+       return (uint16_t) model->switch_to_master(
+               new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj));
 }
 uint32_t cds_atomic_load32(void * obj, int atomic_index, const char * position) {
-       return (uint32_t) ( model->switch_to_master(
-                                                                                               new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj))
-                                                                                       );
+       ensureModel();
+       return (uint32_t) model->switch_to_master(
+               new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)
+               );
 }
 uint64_t cds_atomic_load64(void * obj, int atomic_index, const char * position) {
+       ensureModel();
        return model->switch_to_master(
-               new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)
-               );
+               new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj));
 }
 
 // cds atomic stores
 void cds_atomic_store8(void * obj, uint8_t val, int atomic_index, const char * position) {
-       model->switch_to_master(
-               new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val)
-               );
+       ensureModel();
+       model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
 }
 void cds_atomic_store16(void * obj, uint16_t val, int atomic_index, const char * position) {
-       model->switch_to_master(
-               new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val)
-               );
+       ensureModel();
+       model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
 }
 void cds_atomic_store32(void * obj, uint32_t val, int atomic_index, const char * position) {
-       model->switch_to_master(
-               new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val)
-               );
+       ensureModel();
+       model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
 }
 void cds_atomic_store64(void * obj, uint64_t val, int atomic_index, const char * position) {
-       model->switch_to_master(
-               new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, val)
-               );
+       ensureModel();
+       model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
 }
 
 #define _ATOMIC_RMW_(__op__, size, addr, val, atomic_index, position)            \
@@ -366,7 +364,7 @@ void cds_atomic_thread_fence(int atomic_index, const char * position) {
 void cds_func_entry(const char * funcName) {
        if (!model) return;
 
-        Thread * th = thread_current();
+       Thread * th = thread_current();
        uint32_t func_id;
 
        ModelHistory *history = model->get_history();
@@ -385,7 +383,7 @@ void cds_func_entry(const char * funcName) {
 void cds_func_exit(const char * funcName) {
        if (!model) return;
 
-        Thread * th = thread_current();
+       Thread * th = thread_current();
        uint32_t func_id;
 
        ModelHistory *history = model->get_history();
index 81af78644ac0401a92c61ba20d82fa3711e0f401..67ef78634923118c9055961b5fc73c32470ce57b 100644 (file)
--- a/common.h
+++ b/common.h
@@ -6,12 +6,26 @@
 #define __COMMON_H__
 
 #include <stdio.h>
+#include <unistd.h>
 #include "config.h"
+#include "printf.h"
 
 extern int model_out;
 extern int switch_alloc;
 
-#define model_print(fmt, ...) do { switch_alloc = 1; dprintf(model_out, fmt, ## __VA_ARGS__); switch_alloc = 0; } while (0)
+#define model_print(fmt, ...) do { \
+    switch_alloc = 1;             \
+    char mprintbuf[256];                                               \
+    int printbuflen=snprintf_(mprintbuf, 256, fmt, ## __VA_ARGS__);    \
+    int lenleft = printbuflen < 256 ?printbuflen:256;                  \
+    int totalwritten = 0;\
+    while(lenleft) {                                                   \
+      int byteswritten=write(model_out, &mprintbuf[totalwritten], lenleft);\
+      lenleft-=byteswritten;                                           \
+      totalwritten+=byteswritten;                                      \
+    }                                                                  \
+    switch_alloc = 0;                                                  \
+  } while (0)
 
 #ifdef CONFIG_DEBUG
 #define DEBUG(fmt, ...) do { model_print("*** %15s:%-4d %25s() *** " fmt, __FILE__, __LINE__, __func__, ## __VA_ARGS__); } while (0)
@@ -32,7 +46,7 @@ void assert_hook(void);
                        fprintf(stderr, "Error: assertion failed in %s at line %d\n", __FILE__, __LINE__); \
                        /* print_trace(); // Trace printing may cause dynamic memory allocation */ \
                        assert_hook();                           \
-                       exit(EXIT_FAILURE); \
+                       _Exit(EXIT_FAILURE); \
                } \
        } while (0)
 #else
index af98c2f32619364df8bf27520ad2de2244ce7bad..a72cb675bb5dd20df4c56bceecb3f840a21b30d3 100644 (file)
@@ -56,7 +56,7 @@ ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler, NodeStack
        action_trace(),
        thread_map(2),  /* We'll always need at least 2 threads */
        pthread_map(0),
-       pthread_counter(0),
+       pthread_counter(1),
        obj_map(),
        condvar_waiters_map(),
        obj_thrd_map(),
@@ -776,9 +776,6 @@ bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *
 
        /* Last SC fence in the current thread */
        ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
-       ModelAction *last_sc_write = NULL;
-       if (curr->is_seqcst())
-               last_sc_write = get_last_seq_cst_write(curr);
 
        int tid = curr->get_tid();
        ModelAction *prev_same_thread = NULL;
@@ -1301,7 +1298,8 @@ ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
  */
 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
 {
-       return get_parent_action(tid)->get_cv();
+       ModelAction *firstaction=get_parent_action(tid);
+       return firstaction != NULL ? firstaction->get_cv() : NULL;
 }
 
 bool valequals(uint64_t val1, uint64_t val2, int size) {
@@ -1592,18 +1590,6 @@ Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) cons
        /* Do not split atomic RMW */
        if (curr->is_rmwr())
                return get_thread(curr);
-       if (curr->is_write()) {
-               std::memory_order order = curr->get_mo();
-               switch(order) {
-               case std::memory_order_relaxed:
-                       return get_thread(curr);
-               case std::memory_order_release:
-                       return get_thread(curr);
-               default:
-                       return NULL;
-               }
-       }
-
        /* Follow CREATE with the created thread */
        /* which is not needed, because model.cc takes care of this */
        if (curr->get_type() == THREAD_CREATE)
index 9487fd6e95b8dbd015244aab2e111ecc135e11f1..0a36d31cab50f5532f8171c9e3f83935ab39b840 100644 (file)
@@ -103,8 +103,9 @@ public:
        action_list_t * get_action_trace() { return &action_trace; }
        Fuzzer * getFuzzer();
        CycleGraph * const get_mo_graph() { return mo_graph; }
-       HashTable<pthread_cond_t *, cdsc::condition_variable *, uintptr_t, 4> * getCondMap() {return &cond_map;}
-       HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> * getMutexMap() {return &mutex_map;}
+       HashTable<pthread_cond_t *, cdsc::snapcondition_variable *, uintptr_t, 4> * getCondMap() {return &cond_map;}
+       HashTable<pthread_mutex_t *, cdsc::snapmutex *, uintptr_t, 4> * getMutexMap() {return &mutex_map;}
+       ModelAction * check_current_action(ModelAction *curr);
 
        SnapVector<func_id_list_t *> * get_thrd_func_list() { return &thrd_func_list; }
 
@@ -126,7 +127,6 @@ private:
        modelclock_t get_next_seq_num();
 
        bool next_execution();
-       ModelAction * check_current_action(ModelAction *curr);
        bool initialize_curr_action(ModelAction **curr);
        void process_read(ModelAction *curr, SnapVector<const ModelAction *> * rf_set);
        void process_write(ModelAction *curr);
@@ -166,8 +166,8 @@ private:
 
        HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> obj_thrd_map;
 
-       HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> mutex_map;
-       HashTable<pthread_cond_t *, cdsc::condition_variable *, uintptr_t, 4> cond_map;
+       HashTable<pthread_mutex_t *, cdsc::snapmutex *, uintptr_t, 4> mutex_map;
+       HashTable<pthread_cond_t *, cdsc::snapcondition_variable *, uintptr_t, 4> cond_map;
 
        /**
         * List of pending release sequences. Release sequences might be
index b92981119ec5132a00da44348ac5695f104cc31a..0647337bc725b61d0110953e8670bc01784b6a7f 100644 (file)
--- a/futex.cc
+++ b/futex.cc
@@ -45,8 +45,8 @@ namespace std _GLIBCXX_VISIBILITY(default)
 
                ModelExecution *execution = model->get_execution();
 
-               cdsc::condition_variable *v = new cdsc::condition_variable();
-               cdsc::mutex *m = new cdsc::mutex();
+               cdsc::snapcondition_variable *v = new cdsc::snapcondition_variable();
+               cdsc::snapmutex *m = new cdsc::snapmutex();
 
                execution->getCondMap()->put( (pthread_cond_t *) __addr, v);
                execution->getMutexMap()->put( (pthread_mutex_t *) __addr, m);
index 5984e722b3f27fe008e6504137a26ac1b1a2d048..17ab8f28e66dd26b3d63d68825636d9c3f830da4 100644 (file)
@@ -8,4 +8,4 @@
 
 #include "impatomic.h"
 
-#endif /* __CXX_ATOMIC__ */
+#endif /* __CXX_ATOMIC__ */
index 9e82c030900c4ab2e89a8a8c4b2575e908d03ce1..6b180a9f94a30cfdcf535fb8b53e0f422c376ca9 100644 (file)
 #if __cplusplus
 using std::memory_order;
 extern "C" {
+#else
+typedef int bool;
 #endif
 
+
 uint64_t model_read_action(void * obj, memory_order ord);
 void model_write_action(void * obj, memory_order ord, uint64_t val);
 void model_init_action(void * obj, uint64_t val);
@@ -22,7 +25,7 @@ void model_rmwc_action(void *obj, memory_order ord);
 void model_fence_action(memory_order ord);
 
 uint64_t model_rmwr_action_helper(void *obj, int atomic_index, const char *position);
-  uint64_t model_rmwrcas_action_helper(void *obj, int atomic_index, uint64_t oval, int size, const char *position);
+uint64_t model_rmwrcas_action_helper(void *obj, int atomic_index, uint64_t oval, int size, const char *position);
 void model_rmw_action_helper(void *obj, uint64_t val, int atomic_index, const char *position);
 void model_rmwc_action_helper(void *obj, int atomic_index, const char *position);
 // void model_fence_action_helper(int atomic_index);
@@ -77,23 +80,23 @@ uint32_t cds_atomic_fetch_xor32(void* addr, uint32_t val, int atomic_index, cons
 uint64_t cds_atomic_fetch_xor64(void* addr, uint64_t val, int atomic_index, const char * position);
 
 // cds atomic compare and exchange (strong)
-uint8_t cds_atomic_compare_exchange8_v1(void* addr, uint8_t expected, uint8_t desire, 
-               int atomic_index_succ, int atomic_index_fail, const char *position);
-uint16_t cds_atomic_compare_exchange16_v1(void* addr, uint16_t expected, uint16_t desire, 
-               int atomic_index_succ, int atomic_index_fail, const char *position);
-uint32_t cds_atomic_compare_exchange32_v1(void* addr, uint32_t expected, uint32_t desire, 
-               int atomic_index_succ, int atomic_index_fail, const char *position);
-uint64_t cds_atomic_compare_exchange64_v1(void* addr, uint64_t expected, uint64_t desire, 
-               int atomic_index_succ, int atomic_index_fail, const char *position);
-
-bool cds_atomic_compare_exchange8_v2(void* addr, uint8_t* expected, uint8_t desired, 
-               int atomic_index_succ, int atomic_index_fail, const char *position);
-bool cds_atomic_compare_exchange16_v2(void* addr, uint16_t* expected, uint16_t desired, 
-               int atomic_index_succ, int atomic_index_fail, const char *position);
-bool cds_atomic_compare_exchange32_v2(void* addr, uint32_t* expected, uint32_t desired, 
-               int atomic_index_succ, int atomic_index_fail, const char *position);
-bool cds_atomic_compare_exchange64_v2(void* addr, uint64_t* expected, uint64_t desired, 
-               int atomic_index_succ, int atomic_index_fail, const char *position);
+uint8_t cds_atomic_compare_exchange8_v1(void* addr, uint8_t expected, uint8_t desire,
+                                                                                                                                                               int atomic_index_succ, int atomic_index_fail, const char *position);
+uint16_t cds_atomic_compare_exchange16_v1(void* addr, uint16_t expected, uint16_t desire,
+                                                                                                                                                                       int atomic_index_succ, int atomic_index_fail, const char *position);
+uint32_t cds_atomic_compare_exchange32_v1(void* addr, uint32_t expected, uint32_t desire,
+                                                                                                                                                                       int atomic_index_succ, int atomic_index_fail, const char *position);
+uint64_t cds_atomic_compare_exchange64_v1(void* addr, uint64_t expected, uint64_t desire,
+                                                                                                                                                                       int atomic_index_succ, int atomic_index_fail, const char *position);
+
+bool cds_atomic_compare_exchange8_v2(void* addr, uint8_t* expected, uint8_t desired,
+                                                                                                                                                int atomic_index_succ, int atomic_index_fail, const char *position);
+bool cds_atomic_compare_exchange16_v2(void* addr, uint16_t* expected, uint16_t desired,
+                                                                                                                                                       int atomic_index_succ, int atomic_index_fail, const char *position);
+bool cds_atomic_compare_exchange32_v2(void* addr, uint32_t* expected, uint32_t desired,
+                                                                                                                                                       int atomic_index_succ, int atomic_index_fail, const char *position);
+bool cds_atomic_compare_exchange64_v2(void* addr, uint64_t* expected, uint64_t desired,
+                                                                                                                                                       int atomic_index_succ, int atomic_index_fail, const char *position);
 
 // cds atomic thread fence
 void cds_atomic_thread_fence(int atomic_index, const char * position);
index d6a70d47828880bedb84dfcb45e9769d9684fa18..f992dcb1d08a88f016b16219902f49bb5d2cad46 100644 (file)
@@ -9,16 +9,24 @@ namespace cdsc {
        };
 
        class condition_variable {
-       public:
+public:
                condition_variable();
                ~condition_variable();
                void notify_one();
                void notify_all();
                void wait(mutex& lock);
-               
-       private:
+
+private:
                struct condition_variable_state state;
        };
+
+       class snapcondition_variable: public condition_variable {
+public:
+               snapcondition_variable() : condition_variable() {
+               }
+
+               SNAPSHOTALLOC
+       };
 }
 
-#endif /* __CXX_CONDITION_VARIABLE__ */
+#endif /* __CXX_CONDITION_VARIABLE__ */
index 70b77de2ddc28cf2c3a5c356f718666eb0ea5a65..02239d5f9f1b5afb5e66313d3c11c607443fef3b 100644 (file)
@@ -24,16 +24,16 @@ namespace std {
 typedef struct atomic_flag
 {
 #ifdef __cplusplus
-    bool test_and_set( memory_order = memory_order_seq_cst ) volatile;
-    void clear( memory_order = memory_order_seq_cst ) volatile;
+       bool test_and_set( memory_order = memory_order_seq_cst ) volatile;
+       void clear( memory_order = memory_order_seq_cst ) volatile;
 
-    CPP0X( atomic_flag() = default; )
-    CPP0X( atomic_flag( const atomic_flag& ) = delete; )
-    atomic_flag& operator =( const atomic_flag& ) CPP0X(=delete);
+       CPP0X( atomic_flag() = default; )
+       CPP0X( atomic_flag( const atomic_flag& ) = delete; )
+       atomic_flag& operator =( const atomic_flag& ) CPP0X(=delete);
 
-CPP0X(private:)
+       CPP0X(private:)
 #endif
-    bool __f__;
+       bool __f__;
 } atomic_flag;
 
 #define ATOMIC_FLAG_INIT { false }
@@ -44,14 +44,14 @@ extern "C" {
 
 extern bool atomic_flag_test_and_set( volatile atomic_flag* );
 extern bool atomic_flag_test_and_set_explicit
-( volatile atomic_flag*, memory_order );
+       ( volatile atomic_flag*, memory_order );
 extern void atomic_flag_clear( volatile atomic_flag* );
 extern void atomic_flag_clear_explicit
-( volatile atomic_flag*, memory_order );
+       ( volatile atomic_flag*, memory_order );
 extern void __atomic_flag_wait__
-( volatile atomic_flag* );
+       ( volatile atomic_flag* );
 extern void __atomic_flag_wait_explicit__
-( volatile atomic_flag*, memory_order );
+       ( volatile atomic_flag*, memory_order );
 
 #ifdef __cplusplus
 }
@@ -78,55 +78,55 @@ inline void atomic_flag::clear( memory_order __x__ ) volatile
         __g__=flag, __m__=modified, __o__=operation, __r__=result,
         __p__=pointer to field, __v__=value (for single evaluation),
         __x__=memory-ordering, and __y__=memory-ordering.
-*/
+ */
 
 #define _ATOMIC_LOAD_( __a__, __x__ )                                         \
-        ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
-                __typeof__((__a__)->__f__) __r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__);  \
-                __r__; })
+       ({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
+                __typeof__((__a__)->__f__)__r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__);  \
+                __r__; })
 
 #define _ATOMIC_STORE_( __a__, __m__, __x__ )                                 \
-        ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
-                __typeof__(__m__) __v__ = (__m__);                            \
-                model_write_action((void *) __p__,  __x__, (uint64_t) __v__); \
-                __v__ = __v__; /* Silence clang (-Wunused-value) */           \
-         })
+       ({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
+                __typeof__(__m__)__v__ = (__m__);                            \
+                model_write_action((void *) __p__,  __x__, (uint64_t) __v__); \
+                __v__ = __v__; /* Silence clang (-Wunused-value) */           \
+        })
 
 
 #define _ATOMIC_INIT_( __a__, __m__ )                                         \
-        ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
-                __typeof__(__m__) __v__ = (__m__);                            \
-                model_init_action((void *) __p__,  (uint64_t) __v__);         \
-                __v__ = __v__; /* Silence clang (-Wunused-value) */           \
-         })
+       ({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
+                __typeof__(__m__)__v__ = (__m__);                            \
+                model_init_action((void *) __p__,  (uint64_t) __v__);         \
+                __v__ = __v__; /* Silence clang (-Wunused-value) */           \
+        })
 
 #define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ )                         \
-        ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
-        __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
-        __typeof__(__m__) __v__ = (__m__);                                    \
-        __typeof__((__a__)->__f__) __copy__= __old__;                         \
-        __copy__ __o__ __v__;                                                 \
-        model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);          \
-        __old__ = __old__; /* Silence clang (-Wunused-value) */               \
-         })
+       ({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
+                __typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__); \
+                __typeof__(__m__)__v__ = (__m__);                                    \
+                __typeof__((__a__)->__f__)__copy__= __old__;                         \
+                __copy__ __o__ __v__;                                                 \
+                model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);          \
+                __old__ = __old__;     /* Silence clang (-Wunused-value) */               \
+        })
 
 /* No spurious failure for now */
 #define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_
 
 #define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ )                         \
-        ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
-                __typeof__(__e__) __q__ = (__e__);                            \
-                __typeof__(__m__) __v__ = (__m__);                            \
-                bool __r__;                                                   \
-                __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwrcas_action((void *)__p__, __x__, (uint64_t) * __q__, sizeof((__a__)->__f__)); \
-                if (__t__ == * __q__ ) {;                                     \
-                        model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
-                else {  model_rmwc_action((void *)__p__, __x__); *__q__ = __t__;  __r__ = false;} \
-                __r__; })
+       ({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
+                __typeof__(__e__)__q__ = (__e__);                            \
+                __typeof__(__m__)__v__ = (__m__);                            \
+                bool __r__;                                                   \
+                __typeof__((__a__)->__f__)__t__=(__typeof__((__a__)->__f__))model_rmwrcas_action((void *)__p__, __x__, (uint64_t) *__q__, sizeof((__a__)->__f__)); \
+                if (__t__ == *__q__ ) {;                                     \
+                                                                                                               model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
+                else {  model_rmwc_action((void *)__p__, __x__); *__q__ = __t__;  __r__ = false;} \
+                __r__; })
 
 #define _ATOMIC_FENCE_( __x__ ) \
        ({ model_fence_action(__x__);})
+
 
 #define ATOMIC_CHAR_LOCK_FREE 1
 #define ATOMIC_CHAR16_T_LOCK_FREE 1
@@ -141,1078 +141,1091 @@ inline void atomic_flag::clear( memory_order __x__ ) volatile
 typedef struct atomic_bool
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( bool, memory_order = memory_order_seq_cst ) volatile;
-    bool load( memory_order = memory_order_seq_cst ) volatile;
-    bool exchange( bool, memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak ( bool&, bool, memory_order, memory_order ) volatile;
-    bool compare_exchange_strong ( bool&, bool, memory_order, memory_order ) volatile;
-    bool compare_exchange_weak ( bool&, bool,
-                        memory_order = memory_order_seq_cst) volatile;
-    bool compare_exchange_strong ( bool&, bool,
-                        memory_order = memory_order_seq_cst) volatile;
-
-    CPP0X( atomic_bool() = delete; )
-    CPP0X( constexpr explicit atomic_bool( bool __v__ ) : __f__( __v__ ) { } )
-    CPP0X( atomic_bool( const atomic_bool& ) = delete; )
-    atomic_bool& operator =( const atomic_bool& ) CPP0X(=delete);
-
-    bool operator =( bool __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_bool*, bool,
-                                       memory_order );
-    friend bool atomic_load_explicit( volatile atomic_bool*, memory_order );
-    friend bool atomic_exchange_explicit( volatile atomic_bool*, bool,
-                                      memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_bool*, bool*, bool,
-                                              memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_bool*, bool*, bool,
-                                              memory_order, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( bool, memory_order = memory_order_seq_cst ) volatile;
+       bool load( memory_order = memory_order_seq_cst ) volatile;
+       bool exchange( bool, memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak ( bool&, bool, memory_order, memory_order ) volatile;
+       bool compare_exchange_strong ( bool&, bool, memory_order, memory_order ) volatile;
+       bool compare_exchange_weak ( bool&, bool,
+                                                                                                                        memory_order = memory_order_seq_cst) volatile;
+       bool compare_exchange_strong ( bool&, bool,
+                                                                                                                                memory_order = memory_order_seq_cst) volatile;
+
+       CPP0X( atomic_bool() = delete; )
+       CPP0X( constexpr explicit atomic_bool( bool __v__ ) : __f__( __v__ ) {
+               } )
+       CPP0X( atomic_bool( const atomic_bool& ) = delete; )
+       atomic_bool& operator =( const atomic_bool& ) CPP0X(=delete);
+
+       bool operator =( bool __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_bool*, bool,
+                                                                                                                                                memory_order );
+       friend bool atomic_load_explicit( volatile atomic_bool*, memory_order );
+       friend bool atomic_exchange_explicit( volatile atomic_bool*, bool,
+                                                                                                                                                               memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_bool*, bool*, bool,
+                                                                                                                                                                                                                memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_bool*, bool*, bool,
+                                                                                                                                                                                                                        memory_order, memory_order );
+
+       CPP0X(private:)
 #endif
-    bool __f__;
+       bool __f__;
 } atomic_bool;
 
 
 typedef struct atomic_address
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( void*, memory_order = memory_order_seq_cst ) volatile;
-    void* load( memory_order = memory_order_seq_cst ) volatile;
-    void* exchange( void*, memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( void*&, void*, memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( void*&, void*, memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( void*&, void*,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( void*&, void*,
-                       memory_order = memory_order_seq_cst ) volatile;
-    void* fetch_add( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
-    void* fetch_sub( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_address() = default; )
-    CPP0X( constexpr explicit atomic_address( void* __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_address( const atomic_address& ) = delete; )
-    atomic_address& operator =( const atomic_address & ) CPP0X(=delete);
-
-    void* operator =( void* __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    void* operator +=( ptrdiff_t __v__ ) volatile
-    { return fetch_add( __v__ ); }
-
-    void* operator -=( ptrdiff_t __v__ ) volatile
-    { return fetch_sub( __v__ ); }
-
-    friend void atomic_store_explicit( volatile atomic_address*, void*,
-                                       memory_order );
-    friend void* atomic_load_explicit( volatile atomic_address*, memory_order );
-    friend void* atomic_exchange_explicit( volatile atomic_address*, void*,
-                                       memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_address*,
-                              void**, void*, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_address*,
-                              void**, void*, memory_order, memory_order );
-    friend void* atomic_fetch_add_explicit( volatile atomic_address*, ptrdiff_t,
-                                            memory_order );
-    friend void* atomic_fetch_sub_explicit( volatile atomic_address*, ptrdiff_t,
-                                            memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( void*, memory_order = memory_order_seq_cst ) volatile;
+       void* load( memory_order = memory_order_seq_cst ) volatile;
+       void* exchange( void*, memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( void*&, void*, memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( void*&, void*, memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( void*&, void*,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( void*&, void*,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       void* fetch_add( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
+       void* fetch_sub( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_address() = default; )
+       CPP0X( constexpr explicit atomic_address( void* __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_address( const atomic_address& ) = delete; )
+       atomic_address& operator =( const atomic_address & ) CPP0X(=delete);
+
+       void* operator =( void* __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       void* operator +=( ptrdiff_t __v__ ) volatile
+       { return fetch_add( __v__ ); }
+
+       void* operator -=( ptrdiff_t __v__ ) volatile
+       { return fetch_sub( __v__ ); }
+
+       friend void atomic_store_explicit( volatile atomic_address*, void*,
+                                                                                                                                                memory_order );
+       friend void* atomic_load_explicit( volatile atomic_address*, memory_order );
+       friend void* atomic_exchange_explicit( volatile atomic_address*, void*,
+                                                                                                                                                                memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_address*,
+                                                                                                                                                                                                                void**, void*, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_address*,
+                                                                                                                                                                                                                        void**, void*, memory_order, memory_order );
+       friend void* atomic_fetch_add_explicit( volatile atomic_address*, ptrdiff_t,
+                                                                                                                                                                       memory_order );
+       friend void* atomic_fetch_sub_explicit( volatile atomic_address*, ptrdiff_t,
+                                                                                                                                                                       memory_order );
+
+       CPP0X(private:)
 #endif
-    void* __f__;
+       void* __f__;
 } atomic_address;
 
 
 typedef struct atomic_char
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( char,
-                memory_order = memory_order_seq_cst ) volatile;
-    char load( memory_order = memory_order_seq_cst ) volatile;
-    char exchange( char,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( char&, char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( char&, char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( char&, char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( char&, char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    char fetch_add( char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    char fetch_sub( char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    char fetch_and( char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    char fetch_or( char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    char fetch_xor( char,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_char() = default; )
-    CPP0X( constexpr atomic_char( char __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_char( const atomic_char& ) = delete; )
-    atomic_char& operator =( const atomic_char& ) CPP0X(=delete);
-
-    char operator =( char __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    char operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    char operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    char operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    char operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    char operator +=( char __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    char operator -=( char __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    char operator &=( char __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    char operator |=( char __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    char operator ^=( char __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_char*, char,
-                                       memory_order );
-    friend char atomic_load_explicit( volatile atomic_char*,
-                                             memory_order );
-    friend char atomic_exchange_explicit( volatile atomic_char*,
-                                             char, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_char*,
-                      char*, char, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_char*,
-                      char*, char, memory_order, memory_order );
-    friend char atomic_fetch_add_explicit( volatile atomic_char*,
-                                                  char, memory_order );
-    friend char atomic_fetch_sub_explicit( volatile atomic_char*,
-                                                  char, memory_order );
-    friend char atomic_fetch_and_explicit( volatile atomic_char*,
-                                                  char, memory_order );
-    friend char atomic_fetch_or_explicit(  volatile atomic_char*,
-                                                  char, memory_order );
-    friend char atomic_fetch_xor_explicit( volatile atomic_char*,
-                                                  char, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( char,
+                                                       memory_order = memory_order_seq_cst ) volatile;
+       char load( memory_order = memory_order_seq_cst ) volatile;
+       char exchange( char,
+                                                                memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( char&, char,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( char&, char,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( char&, char,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( char&, char,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       char fetch_add( char,
+                                                                       memory_order = memory_order_seq_cst ) volatile;
+       char fetch_sub( char,
+                                                                       memory_order = memory_order_seq_cst ) volatile;
+       char fetch_and( char,
+                                                                       memory_order = memory_order_seq_cst ) volatile;
+       char fetch_or( char,
+                                                                memory_order = memory_order_seq_cst ) volatile;
+       char fetch_xor( char,
+                                                                       memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_char() = default; )
+       CPP0X( constexpr atomic_char( char __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_char( const atomic_char& ) = delete; )
+       atomic_char& operator =( const atomic_char& ) CPP0X(=delete);
+
+       char operator =( char __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       char operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       char operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       char operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       char operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       char operator +=( char __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       char operator -=( char __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       char operator &=( char __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       char operator |=( char __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       char operator ^=( char __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_char*, char,
+                                                                                                                                                memory_order );
+       friend char atomic_load_explicit( volatile atomic_char*,
+                                                                                                                                               memory_order );
+       friend char atomic_exchange_explicit( volatile atomic_char*,
+                                                                                                                                                               char, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_char*,
+                                                                                                                                                                                                                char*, char, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_char*,
+                                                                                                                                                                                                                        char*, char, memory_order, memory_order );
+       friend char atomic_fetch_add_explicit( volatile atomic_char*,
+                                                                                                                                                                char, memory_order );
+       friend char atomic_fetch_sub_explicit( volatile atomic_char*,
+                                                                                                                                                                char, memory_order );
+       friend char atomic_fetch_and_explicit( volatile atomic_char*,
+                                                                                                                                                                char, memory_order );
+       friend char atomic_fetch_or_explicit(  volatile atomic_char*,
+                                                                                                                                                                char, memory_order );
+       friend char atomic_fetch_xor_explicit( volatile atomic_char*,
+                                                                                                                                                                char, memory_order );
+
+       CPP0X(private:)
 #endif
-    char __f__;
+       char __f__;
 } atomic_char;
 
 
 typedef struct atomic_schar
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( signed char,
-                memory_order = memory_order_seq_cst ) volatile;
-    signed char load( memory_order = memory_order_seq_cst ) volatile;
-    signed char exchange( signed char,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( signed char&, signed char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( signed char&, signed char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( signed char&, signed char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( signed char&, signed char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    signed char fetch_add( signed char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    signed char fetch_sub( signed char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    signed char fetch_and( signed char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    signed char fetch_or( signed char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    signed char fetch_xor( signed char,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_schar() = default; )
-    CPP0X( constexpr atomic_schar( signed char __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_schar( const atomic_schar& ) = delete; )
-    atomic_schar& operator =( const atomic_schar& ) CPP0X(=delete);
-
-    signed char operator =( signed char __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    signed char operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    signed char operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    signed char operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    signed char operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    signed char operator +=( signed char __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    signed char operator -=( signed char __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    signed char operator &=( signed char __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    signed char operator |=( signed char __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    signed char operator ^=( signed char __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_schar*, signed char,
-                                       memory_order );
-    friend signed char atomic_load_explicit( volatile atomic_schar*,
-                                             memory_order );
-    friend signed char atomic_exchange_explicit( volatile atomic_schar*,
-                                             signed char, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_schar*,
-                      signed char*, signed char, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_schar*,
-                      signed char*, signed char, memory_order, memory_order );
-    friend signed char atomic_fetch_add_explicit( volatile atomic_schar*,
-                                                  signed char, memory_order );
-    friend signed char atomic_fetch_sub_explicit( volatile atomic_schar*,
-                                                  signed char, memory_order );
-    friend signed char atomic_fetch_and_explicit( volatile atomic_schar*,
-                                                  signed char, memory_order );
-    friend signed char atomic_fetch_or_explicit(  volatile atomic_schar*,
-                                                  signed char, memory_order );
-    friend signed char atomic_fetch_xor_explicit( volatile atomic_schar*,
-                                                  signed char, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( signed char,
+                                                       memory_order = memory_order_seq_cst ) volatile;
+       signed char load( memory_order = memory_order_seq_cst ) volatile;
+       signed char exchange( signed char,
+                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( signed char&, signed char,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( signed char&, signed char,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( signed char&, signed char,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( signed char&, signed char,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       signed char fetch_add( signed char,
+                                                                                                memory_order = memory_order_seq_cst ) volatile;
+       signed char fetch_sub( signed char,
+                                                                                                memory_order = memory_order_seq_cst ) volatile;
+       signed char fetch_and( signed char,
+                                                                                                memory_order = memory_order_seq_cst ) volatile;
+       signed char fetch_or( signed char,
+                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       signed char fetch_xor( signed char,
+                                                                                                memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_schar() = default; )
+       CPP0X( constexpr atomic_schar( signed char __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_schar( const atomic_schar& ) = delete; )
+       atomic_schar& operator =( const atomic_schar& ) CPP0X(=delete);
+
+       signed char operator =( signed char __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       signed char operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       signed char operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       signed char operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       signed char operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       signed char operator +=( signed char __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       signed char operator -=( signed char __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       signed char operator &=( signed char __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       signed char operator |=( signed char __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       signed char operator ^=( signed char __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_schar*, signed char,
+                                                                                                                                                memory_order );
+       friend signed char atomic_load_explicit( volatile atomic_schar*,
+                                                                                                                                                                        memory_order );
+       friend signed char atomic_exchange_explicit( volatile atomic_schar*,
+                                                                                                                                                                                        signed char, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_schar*,
+                                                                                                                                                                                                                signed char*, signed char, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_schar*,
+                                                                                                                                                                                                                        signed char*, signed char, memory_order, memory_order );
+       friend signed char atomic_fetch_add_explicit( volatile atomic_schar*,
+                                                                                                                                                                                               signed char, memory_order );
+       friend signed char atomic_fetch_sub_explicit( volatile atomic_schar*,
+                                                                                                                                                                                               signed char, memory_order );
+       friend signed char atomic_fetch_and_explicit( volatile atomic_schar*,
+                                                                                                                                                                                               signed char, memory_order );
+       friend signed char atomic_fetch_or_explicit(  volatile atomic_schar*,
+                                                                                                                                                                                               signed char, memory_order );
+       friend signed char atomic_fetch_xor_explicit( volatile atomic_schar*,
+                                                                                                                                                                                               signed char, memory_order );
+
+       CPP0X(private:)
 #endif
-    signed char __f__;
+       signed char __f__;
 } atomic_schar;
 
 
 typedef struct atomic_uchar
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( unsigned char,
-                memory_order = memory_order_seq_cst ) volatile;
-    unsigned char load( memory_order = memory_order_seq_cst ) volatile;
-    unsigned char exchange( unsigned char,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( unsigned char&, unsigned char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( unsigned char&, unsigned char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( unsigned char&, unsigned char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( unsigned char&, unsigned char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    unsigned char fetch_add( unsigned char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned char fetch_sub( unsigned char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned char fetch_and( unsigned char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned char fetch_or( unsigned char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned char fetch_xor( unsigned char,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_uchar() = default; )
-    CPP0X( constexpr atomic_uchar( unsigned char __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_uchar( const atomic_uchar& ) = delete; )
-    atomic_uchar& operator =( const atomic_uchar& ) CPP0X(=delete);
-
-    unsigned char operator =( unsigned char __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    unsigned char operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    unsigned char operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    unsigned char operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    unsigned char operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    unsigned char operator +=( unsigned char __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    unsigned char operator -=( unsigned char __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    unsigned char operator &=( unsigned char __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    unsigned char operator |=( unsigned char __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    unsigned char operator ^=( unsigned char __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_uchar*, unsigned char,
-                                       memory_order );
-    friend unsigned char atomic_load_explicit( volatile atomic_uchar*,
-                                             memory_order );
-    friend unsigned char atomic_exchange_explicit( volatile atomic_uchar*,
-                                             unsigned char, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_uchar*,
-                      unsigned char*, unsigned char, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_uchar*,
-                      unsigned char*, unsigned char, memory_order, memory_order );
-    friend unsigned char atomic_fetch_add_explicit( volatile atomic_uchar*,
-                                                  unsigned char, memory_order );
-    friend unsigned char atomic_fetch_sub_explicit( volatile atomic_uchar*,
-                                                  unsigned char, memory_order );
-    friend unsigned char atomic_fetch_and_explicit( volatile atomic_uchar*,
-                                                  unsigned char, memory_order );
-    friend unsigned char atomic_fetch_or_explicit(  volatile atomic_uchar*,
-                                                  unsigned char, memory_order );
-    friend unsigned char atomic_fetch_xor_explicit( volatile atomic_uchar*,
-                                                  unsigned char, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( unsigned char,
+                                                       memory_order = memory_order_seq_cst ) volatile;
+       unsigned char load( memory_order = memory_order_seq_cst ) volatile;
+       unsigned char exchange( unsigned char,
+                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( unsigned char&, unsigned char,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( unsigned char&, unsigned char,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( unsigned char&, unsigned char,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( unsigned char&, unsigned char,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       unsigned char fetch_add( unsigned char,
+                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       unsigned char fetch_sub( unsigned char,
+                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       unsigned char fetch_and( unsigned char,
+                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       unsigned char fetch_or( unsigned char,
+                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       unsigned char fetch_xor( unsigned char,
+                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_uchar() = default; )
+       CPP0X( constexpr atomic_uchar( unsigned char __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_uchar( const atomic_uchar& ) = delete; )
+       atomic_uchar& operator =( const atomic_uchar& ) CPP0X(=delete);
+
+       unsigned char operator =( unsigned char __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       unsigned char operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       unsigned char operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       unsigned char operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       unsigned char operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       unsigned char operator +=( unsigned char __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       unsigned char operator -=( unsigned char __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       unsigned char operator &=( unsigned char __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       unsigned char operator |=( unsigned char __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       unsigned char operator ^=( unsigned char __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_uchar*, unsigned char,
+                                                                                                                                                memory_order );
+       friend unsigned char atomic_load_explicit( volatile atomic_uchar*,
+                                                                                                                                                                                memory_order );
+       friend unsigned char atomic_exchange_explicit( volatile atomic_uchar*,
+                                                                                                                                                                                                unsigned char, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_uchar*,
+                                                                                                                                                                                                                unsigned char*, unsigned char, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_uchar*,
+                                                                                                                                                                                                                        unsigned char*, unsigned char, memory_order, memory_order );
+       friend unsigned char atomic_fetch_add_explicit( volatile atomic_uchar*,
+                                                                                                                                                                                                       unsigned char, memory_order );
+       friend unsigned char atomic_fetch_sub_explicit( volatile atomic_uchar*,
+                                                                                                                                                                                                       unsigned char, memory_order );
+       friend unsigned char atomic_fetch_and_explicit( volatile atomic_uchar*,
+                                                                                                                                                                                                       unsigned char, memory_order );
+       friend unsigned char atomic_fetch_or_explicit(  volatile atomic_uchar*,
+                                                                                                                                                                                                       unsigned char, memory_order );
+       friend unsigned char atomic_fetch_xor_explicit( volatile atomic_uchar*,
+                                                                                                                                                                                                       unsigned char, memory_order );
+
+       CPP0X(private:)
 #endif
-    unsigned char __f__;
+       unsigned char __f__;
 } atomic_uchar;
 
 
 typedef struct atomic_short
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( short,
-                memory_order = memory_order_seq_cst ) volatile;
-    short load( memory_order = memory_order_seq_cst ) volatile;
-    short exchange( short,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( short&, short,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( short&, short,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( short&, short,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( short&, short,
-                       memory_order = memory_order_seq_cst ) volatile;
-    short fetch_add( short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    short fetch_sub( short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    short fetch_and( short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    short fetch_or( short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    short fetch_xor( short,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_short() = default; )
-    CPP0X( constexpr atomic_short( short __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_short( const atomic_short& ) = delete; )
-    atomic_short& operator =( const atomic_short& ) CPP0X(=delete);
-
-    short operator =( short __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    short operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    short operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    short operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    short operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    short operator +=( short __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    short operator -=( short __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    short operator &=( short __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    short operator |=( short __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    short operator ^=( short __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_short*, short,
-                                       memory_order );
-    friend short atomic_load_explicit( volatile atomic_short*,
-                                             memory_order );
-    friend short atomic_exchange_explicit( volatile atomic_short*,
-                                             short, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_short*,
-                      short*, short, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_short*,
-                      short*, short, memory_order, memory_order );
-    friend short atomic_fetch_add_explicit( volatile atomic_short*,
-                                                  short, memory_order );
-    friend short atomic_fetch_sub_explicit( volatile atomic_short*,
-                                                  short, memory_order );
-    friend short atomic_fetch_and_explicit( volatile atomic_short*,
-                                                  short, memory_order );
-    friend short atomic_fetch_or_explicit(  volatile atomic_short*,
-                                                  short, memory_order );
-    friend short atomic_fetch_xor_explicit( volatile atomic_short*,
-                                                  short, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( short,
+                                                       memory_order = memory_order_seq_cst ) volatile;
+       short load( memory_order = memory_order_seq_cst ) volatile;
+       short exchange( short,
+                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( short&, short,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( short&, short,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( short&, short,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( short&, short,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       short fetch_add( short,
+                                                                        memory_order = memory_order_seq_cst ) volatile;
+       short fetch_sub( short,
+                                                                        memory_order = memory_order_seq_cst ) volatile;
+       short fetch_and( short,
+                                                                        memory_order = memory_order_seq_cst ) volatile;
+       short fetch_or( short,
+                                                                       memory_order = memory_order_seq_cst ) volatile;
+       short fetch_xor( short,
+                                                                        memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_short() = default; )
+       CPP0X( constexpr atomic_short( short __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_short( const atomic_short& ) = delete; )
+       atomic_short& operator =( const atomic_short& ) CPP0X(=delete);
+
+       short operator =( short __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       short operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       short operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       short operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       short operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       short operator +=( short __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       short operator -=( short __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       short operator &=( short __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       short operator |=( short __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       short operator ^=( short __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_short*, short,
+                                                                                                                                                memory_order );
+       friend short atomic_load_explicit( volatile atomic_short*,
+                                                                                                                                                memory_order );
+       friend short atomic_exchange_explicit( volatile atomic_short*,
+                                                                                                                                                                short, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_short*,
+                                                                                                                                                                                                                short*, short, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_short*,
+                                                                                                                                                                                                                        short*, short, memory_order, memory_order );
+       friend short atomic_fetch_add_explicit( volatile atomic_short*,
+                                                                                                                                                                       short, memory_order );
+       friend short atomic_fetch_sub_explicit( volatile atomic_short*,
+                                                                                                                                                                       short, memory_order );
+       friend short atomic_fetch_and_explicit( volatile atomic_short*,
+                                                                                                                                                                       short, memory_order );
+       friend short atomic_fetch_or_explicit(  volatile atomic_short*,
+                                                                                                                                                                       short, memory_order );
+       friend short atomic_fetch_xor_explicit( volatile atomic_short*,
+                                                                                                                                                                       short, memory_order );
+
+       CPP0X(private:)
 #endif
-    short __f__;
+       short __f__;
 } atomic_short;
 
 
 typedef struct atomic_ushort
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( unsigned short,
-                memory_order = memory_order_seq_cst ) volatile;
-    unsigned short load( memory_order = memory_order_seq_cst ) volatile;
-    unsigned short exchange( unsigned short,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( unsigned short&, unsigned short,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( unsigned short&, unsigned short,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( unsigned short&, unsigned short,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( unsigned short&, unsigned short,
-                       memory_order = memory_order_seq_cst ) volatile;
-    unsigned short fetch_add( unsigned short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned short fetch_sub( unsigned short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned short fetch_and( unsigned short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned short fetch_or( unsigned short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned short fetch_xor( unsigned short,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_ushort() = default; )
-    CPP0X( constexpr atomic_ushort( unsigned short __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_ushort( const atomic_ushort& ) = delete; )
-    atomic_ushort& operator =( const atomic_ushort& ) CPP0X(=delete);
-
-    unsigned short operator =( unsigned short __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    unsigned short operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    unsigned short operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    unsigned short operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    unsigned short operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    unsigned short operator +=( unsigned short __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    unsigned short operator -=( unsigned short __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    unsigned short operator &=( unsigned short __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    unsigned short operator |=( unsigned short __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    unsigned short operator ^=( unsigned short __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_ushort*, unsigned short,
-                                       memory_order );
-    friend unsigned short atomic_load_explicit( volatile atomic_ushort*,
-                                             memory_order );
-    friend unsigned short atomic_exchange_explicit( volatile atomic_ushort*,
-                                             unsigned short, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ushort*,
-                      unsigned short*, unsigned short, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ushort*,
-                      unsigned short*, unsigned short, memory_order, memory_order );
-    friend unsigned short atomic_fetch_add_explicit( volatile atomic_ushort*,
-                                                  unsigned short, memory_order );
-    friend unsigned short atomic_fetch_sub_explicit( volatile atomic_ushort*,
-                                                  unsigned short, memory_order );
-    friend unsigned short atomic_fetch_and_explicit( volatile atomic_ushort*,
-                                                  unsigned short, memory_order );
-    friend unsigned short atomic_fetch_or_explicit(  volatile atomic_ushort*,
-                                                  unsigned short, memory_order );
-    friend unsigned short atomic_fetch_xor_explicit( volatile atomic_ushort*,
-                                                  unsigned short, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( unsigned short,
+                                                       memory_order = memory_order_seq_cst ) volatile;
+       unsigned short load( memory_order = memory_order_seq_cst ) volatile;
+       unsigned short exchange( unsigned short,
+                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( unsigned short&, unsigned short,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( unsigned short&, unsigned short,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( unsigned short&, unsigned short,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( unsigned short&, unsigned short,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       unsigned short fetch_add( unsigned short,
+                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       unsigned short fetch_sub( unsigned short,
+                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       unsigned short fetch_and( unsigned short,
+                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       unsigned short fetch_or( unsigned short,
+                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       unsigned short fetch_xor( unsigned short,
+                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_ushort() = default; )
+       CPP0X( constexpr atomic_ushort( unsigned short __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_ushort( const atomic_ushort& ) = delete; )
+       atomic_ushort& operator =( const atomic_ushort& ) CPP0X(=delete);
+
+       unsigned short operator =( unsigned short __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       unsigned short operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       unsigned short operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       unsigned short operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       unsigned short operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       unsigned short operator +=( unsigned short __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       unsigned short operator -=( unsigned short __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       unsigned short operator &=( unsigned short __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       unsigned short operator |=( unsigned short __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       unsigned short operator ^=( unsigned short __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_ushort*, unsigned short,
+                                                                                                                                                memory_order );
+       friend unsigned short atomic_load_explicit( volatile atomic_ushort*,
+                                                                                                                                                                                       memory_order );
+       friend unsigned short atomic_exchange_explicit( volatile atomic_ushort*,
+                                                                                                                                                                                                       unsigned short, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ushort*,
+                                                                                                                                                                                                                unsigned short*, unsigned short, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ushort*,
+                                                                                                                                                                                                                        unsigned short*, unsigned short, memory_order, memory_order );
+       friend unsigned short atomic_fetch_add_explicit( volatile atomic_ushort*,
+                                                                                                                                                                                                        unsigned short, memory_order );
+       friend unsigned short atomic_fetch_sub_explicit( volatile atomic_ushort*,
+                                                                                                                                                                                                        unsigned short, memory_order );
+       friend unsigned short atomic_fetch_and_explicit( volatile atomic_ushort*,
+                                                                                                                                                                                                        unsigned short, memory_order );
+       friend unsigned short atomic_fetch_or_explicit(  volatile atomic_ushort*,
+                                                                                                                                                                                                        unsigned short, memory_order );
+       friend unsigned short atomic_fetch_xor_explicit( volatile atomic_ushort*,
+                                                                                                                                                                                                        unsigned short, memory_order );
+
+       CPP0X(private:)
 #endif
-    unsigned short __f__;
+       unsigned short __f__;
 } atomic_ushort;
 
 
 typedef struct atomic_int
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( int,
-                memory_order = memory_order_seq_cst ) volatile;
-    int load( memory_order = memory_order_seq_cst ) volatile;
-    int exchange( int,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( int&, int,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( int&, int,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( int&, int,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( int&, int,
-                       memory_order = memory_order_seq_cst ) volatile;
-    int fetch_add( int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    int fetch_sub( int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    int fetch_and( int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    int fetch_or( int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    int fetch_xor( int,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_int() = default; )
-    CPP0X( constexpr atomic_int( int __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_int( const atomic_int& ) = delete; )
-    atomic_int& operator =( const atomic_int& ) CPP0X(=delete);
-
-    int operator =( int __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    int operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    int operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    int operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    int operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    int operator +=( int __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    int operator -=( int __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    int operator &=( int __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    int operator |=( int __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    int operator ^=( int __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_int*, int,
-                                       memory_order );
-    friend int atomic_load_explicit( volatile atomic_int*,
-                                             memory_order );
-    friend int atomic_exchange_explicit( volatile atomic_int*,
-                                             int, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_int*,
-                      int*, int, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_int*,
-                      int*, int, memory_order, memory_order );
-    friend int atomic_fetch_add_explicit( volatile atomic_int*,
-                                                  int, memory_order );
-    friend int atomic_fetch_sub_explicit( volatile atomic_int*,
-                                                  int, memory_order );
-    friend int atomic_fetch_and_explicit( volatile atomic_int*,
-                                                  int, memory_order );
-    friend int atomic_fetch_or_explicit(  volatile atomic_int*,
-                                                  int, memory_order );
-    friend int atomic_fetch_xor_explicit( volatile atomic_int*,
-                                                  int, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( int,
+                                                       memory_order = memory_order_seq_cst ) volatile;
+       int load( memory_order = memory_order_seq_cst ) volatile;
+       int exchange( int,
+                                                               memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( int&, int,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( int&, int,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( int&, int,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( int&, int,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       int fetch_add( int,
+                                                                memory_order = memory_order_seq_cst ) volatile;
+       int fetch_sub( int,
+                                                                memory_order = memory_order_seq_cst ) volatile;
+       int fetch_and( int,
+                                                                memory_order = memory_order_seq_cst ) volatile;
+       int fetch_or( int,
+                                                               memory_order = memory_order_seq_cst ) volatile;
+       int fetch_xor( int,
+                                                                memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_int() = default; )
+       CPP0X( constexpr atomic_int( int __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_int( const atomic_int& ) = delete; )
+       atomic_int& operator =( const atomic_int& ) CPP0X(=delete);
+
+       int operator =( int __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       int operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       int operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       int operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       int operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       int operator +=( int __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       int operator -=( int __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       int operator &=( int __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       int operator |=( int __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       int operator ^=( int __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_int*, int,
+                                                                                                                                                memory_order );
+       friend int atomic_load_explicit( volatile atomic_int*,
+                                                                                                                                        memory_order );
+       friend int atomic_exchange_explicit( volatile atomic_int*,
+                                                                                                                                                        int, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_int*,
+                                                                                                                                                                                                                int*, int, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_int*,
+                                                                                                                                                                                                                        int*, int, memory_order, memory_order );
+       friend int atomic_fetch_add_explicit( volatile atomic_int*,
+                                                                                                                                                               int, memory_order );
+       friend int atomic_fetch_sub_explicit( volatile atomic_int*,
+                                                                                                                                                               int, memory_order );
+       friend int atomic_fetch_and_explicit( volatile atomic_int*,
+                                                                                                                                                               int, memory_order );
+       friend int atomic_fetch_or_explicit(  volatile atomic_int*,
+                                                                                                                                                               int, memory_order );
+       friend int atomic_fetch_xor_explicit( volatile atomic_int*,
+                                                                                                                                                               int, memory_order );
+
+       CPP0X(private:)
 #endif
-    int __f__;
+       int __f__;
 } atomic_int;
 
 
 typedef struct atomic_uint
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( unsigned int,
-                memory_order = memory_order_seq_cst ) volatile;
-    unsigned int load( memory_order = memory_order_seq_cst ) volatile;
-    unsigned int exchange( unsigned int,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( unsigned int&, unsigned int,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( unsigned int&, unsigned int,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( unsigned int&, unsigned int,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( unsigned int&, unsigned int,
-                       memory_order = memory_order_seq_cst ) volatile;
-    unsigned int fetch_add( unsigned int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned int fetch_sub( unsigned int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned int fetch_and( unsigned int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned int fetch_or( unsigned int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned int fetch_xor( unsigned int,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_uint() = default; )
-    CPP0X( constexpr atomic_uint( unsigned int __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_uint( const atomic_uint& ) = delete; )
-    atomic_uint& operator =( const atomic_uint& ) CPP0X(=delete);
-
-    unsigned int operator =( unsigned int __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    unsigned int operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    unsigned int operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    unsigned int operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    unsigned int operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    unsigned int operator +=( unsigned int __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    unsigned int operator -=( unsigned int __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    unsigned int operator &=( unsigned int __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    unsigned int operator |=( unsigned int __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    unsigned int operator ^=( unsigned int __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_uint*, unsigned int,
-                                       memory_order );
-    friend unsigned int atomic_load_explicit( volatile atomic_uint*,
-                                             memory_order );
-    friend unsigned int atomic_exchange_explicit( volatile atomic_uint*,
-                                             unsigned int, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_uint*,
-                      unsigned int*, unsigned int, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_uint*,
-                      unsigned int*, unsigned int, memory_order, memory_order );
-    friend unsigned int atomic_fetch_add_explicit( volatile atomic_uint*,
-                                                  unsigned int, memory_order );
-    friend unsigned int atomic_fetch_sub_explicit( volatile atomic_uint*,
-                                                  unsigned int, memory_order );
-    friend unsigned int atomic_fetch_and_explicit( volatile atomic_uint*,
-                                                  unsigned int, memory_order );
-    friend unsigned int atomic_fetch_or_explicit(  volatile atomic_uint*,
-                                                  unsigned int, memory_order );
-    friend unsigned int atomic_fetch_xor_explicit( volatile atomic_uint*,
-                                                  unsigned int, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( unsigned int,
+                                                       memory_order = memory_order_seq_cst ) volatile;
+       unsigned int load( memory_order = memory_order_seq_cst ) volatile;
+       unsigned int exchange( unsigned int,
+                                                                                                memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( unsigned int&, unsigned int,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( unsigned int&, unsigned int,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( unsigned int&, unsigned int,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( unsigned int&, unsigned int,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       unsigned int fetch_add( unsigned int,
+                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       unsigned int fetch_sub( unsigned int,
+                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       unsigned int fetch_and( unsigned int,
+                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       unsigned int fetch_or( unsigned int,
+                                                                                                memory_order = memory_order_seq_cst ) volatile;
+       unsigned int fetch_xor( unsigned int,
+                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_uint() = default; )
+       CPP0X( constexpr atomic_uint( unsigned int __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_uint( const atomic_uint& ) = delete; )
+       atomic_uint& operator =( const atomic_uint& ) CPP0X(=delete);
+
+       unsigned int operator =( unsigned int __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       unsigned int operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       unsigned int operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       unsigned int operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       unsigned int operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       unsigned int operator +=( unsigned int __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       unsigned int operator -=( unsigned int __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       unsigned int operator &=( unsigned int __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       unsigned int operator |=( unsigned int __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       unsigned int operator ^=( unsigned int __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_uint*, unsigned int,
+                                                                                                                                                memory_order );
+       friend unsigned int atomic_load_explicit( volatile atomic_uint*,
+                                                                                                                                                                               memory_order );
+       friend unsigned int atomic_exchange_explicit( volatile atomic_uint*,
+                                                                                                                                                                                               unsigned int, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_uint*,
+                                                                                                                                                                                                                unsigned int*, unsigned int, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_uint*,
+                                                                                                                                                                                                                        unsigned int*, unsigned int, memory_order, memory_order );
+       friend unsigned int atomic_fetch_add_explicit( volatile atomic_uint*,
+                                                                                                                                                                                                unsigned int, memory_order );
+       friend unsigned int atomic_fetch_sub_explicit( volatile atomic_uint*,
+                                                                                                                                                                                                unsigned int, memory_order );
+       friend unsigned int atomic_fetch_and_explicit( volatile atomic_uint*,
+                                                                                                                                                                                                unsigned int, memory_order );
+       friend unsigned int atomic_fetch_or_explicit(  volatile atomic_uint*,
+                                                                                                                                                                                                unsigned int, memory_order );
+       friend unsigned int atomic_fetch_xor_explicit( volatile atomic_uint*,
+                                                                                                                                                                                                unsigned int, memory_order );
+
+       CPP0X(private:)
 #endif
-    unsigned int __f__;
+       unsigned int __f__;
 } atomic_uint;
 
 
 typedef struct atomic_long
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( long,
-                memory_order = memory_order_seq_cst ) volatile;
-    long load( memory_order = memory_order_seq_cst ) volatile;
-    long exchange( long,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( long&, long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( long&, long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( long&, long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( long&, long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    long fetch_add( long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long fetch_sub( long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long fetch_and( long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long fetch_or( long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long fetch_xor( long,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_long() = default; )
-    CPP0X( constexpr atomic_long( long __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_long( const atomic_long& ) = delete; )
-    atomic_long& operator =( const atomic_long& ) CPP0X(=delete);
-
-    long operator =( long __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    long operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    long operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    long operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    long operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    long operator +=( long __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    long operator -=( long __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    long operator &=( long __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    long operator |=( long __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    long operator ^=( long __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_long*, long,
-                                       memory_order );
-    friend long atomic_load_explicit( volatile atomic_long*,
-                                             memory_order );
-    friend long atomic_exchange_explicit( volatile atomic_long*,
-                                             long, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_long*,
-                      long*, long, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_long*,
-                      long*, long, memory_order, memory_order );
-    friend long atomic_fetch_add_explicit( volatile atomic_long*,
-                                                  long, memory_order );
-    friend long atomic_fetch_sub_explicit( volatile atomic_long*,
-                                                  long, memory_order );
-    friend long atomic_fetch_and_explicit( volatile atomic_long*,
-                                                  long, memory_order );
-    friend long atomic_fetch_or_explicit(  volatile atomic_long*,
-                                                  long, memory_order );
-    friend long atomic_fetch_xor_explicit( volatile atomic_long*,
-                                                  long, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( long,
+                                                       memory_order = memory_order_seq_cst ) volatile;
+       long load( memory_order = memory_order_seq_cst ) volatile;
+       long exchange( long,
+                                                                memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( long&, long,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( long&, long,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( long&, long,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( long&, long,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       long fetch_add( long,
+                                                                       memory_order = memory_order_seq_cst ) volatile;
+       long fetch_sub( long,
+                                                                       memory_order = memory_order_seq_cst ) volatile;
+       long fetch_and( long,
+                                                                       memory_order = memory_order_seq_cst ) volatile;
+       long fetch_or( long,
+                                                                memory_order = memory_order_seq_cst ) volatile;
+       long fetch_xor( long,
+                                                                       memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_long() = default; )
+       CPP0X( constexpr atomic_long( long __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_long( const atomic_long& ) = delete; )
+       atomic_long& operator =( const atomic_long& ) CPP0X(=delete);
+
+       long operator =( long __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       long operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       long operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       long operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       long operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       long operator +=( long __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       long operator -=( long __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       long operator &=( long __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       long operator |=( long __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       long operator ^=( long __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_long*, long,
+                                                                                                                                                memory_order );
+       friend long atomic_load_explicit( volatile atomic_long*,
+                                                                                                                                               memory_order );
+       friend long atomic_exchange_explicit( volatile atomic_long*,
+                                                                                                                                                               long, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_long*,
+                                                                                                                                                                                                                long*, long, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_long*,
+                                                                                                                                                                                                                        long*, long, memory_order, memory_order );
+       friend long atomic_fetch_add_explicit( volatile atomic_long*,
+                                                                                                                                                                long, memory_order );
+       friend long atomic_fetch_sub_explicit( volatile atomic_long*,
+                                                                                                                                                                long, memory_order );
+       friend long atomic_fetch_and_explicit( volatile atomic_long*,
+                                                                                                                                                                long, memory_order );
+       friend long atomic_fetch_or_explicit(  volatile atomic_long*,
+                                                                                                                                                                long, memory_order );
+       friend long atomic_fetch_xor_explicit( volatile atomic_long*,
+                                                                                                                                                                long, memory_order );
+
+       CPP0X(private:)
 #endif
-    long __f__;
+       long __f__;
 } atomic_long;
 
 
 typedef struct atomic_ulong
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( unsigned long,
-                memory_order = memory_order_seq_cst ) volatile;
-    unsigned long load( memory_order = memory_order_seq_cst ) volatile;
-    unsigned long exchange( unsigned long,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( unsigned long&, unsigned long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( unsigned long&, unsigned long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( unsigned long&, unsigned long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( unsigned long&, unsigned long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    unsigned long fetch_add( unsigned long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long fetch_sub( unsigned long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long fetch_and( unsigned long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long fetch_or( unsigned long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long fetch_xor( unsigned long,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_ulong() = default; )
-    CPP0X( constexpr atomic_ulong( unsigned long __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_ulong( const atomic_ulong& ) = delete; )
-    atomic_ulong& operator =( const atomic_ulong& ) CPP0X(=delete);
-
-    unsigned long operator =( unsigned long __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    unsigned long operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    unsigned long operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    unsigned long operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    unsigned long operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    unsigned long operator +=( unsigned long __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    unsigned long operator -=( unsigned long __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    unsigned long operator &=( unsigned long __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    unsigned long operator |=( unsigned long __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    unsigned long operator ^=( unsigned long __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_ulong*, unsigned long,
-                                       memory_order );
-    friend unsigned long atomic_load_explicit( volatile atomic_ulong*,
-                                             memory_order );
-    friend unsigned long atomic_exchange_explicit( volatile atomic_ulong*,
-                                             unsigned long, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ulong*,
-                      unsigned long*, unsigned long, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ulong*,
-                      unsigned long*, unsigned long, memory_order, memory_order );
-    friend unsigned long atomic_fetch_add_explicit( volatile atomic_ulong*,
-                                                  unsigned long, memory_order );
-    friend unsigned long atomic_fetch_sub_explicit( volatile atomic_ulong*,
-                                                  unsigned long, memory_order );
-    friend unsigned long atomic_fetch_and_explicit( volatile atomic_ulong*,
-                                                  unsigned long, memory_order );
-    friend unsigned long atomic_fetch_or_explicit(  volatile atomic_ulong*,
-                                                  unsigned long, memory_order );
-    friend unsigned long atomic_fetch_xor_explicit( volatile atomic_ulong*,
-                                                  unsigned long, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( unsigned long,
+                                                       memory_order = memory_order_seq_cst ) volatile;
+       unsigned long load( memory_order = memory_order_seq_cst ) volatile;
+       unsigned long exchange( unsigned long,
+                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( unsigned long&, unsigned long,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( unsigned long&, unsigned long,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( unsigned long&, unsigned long,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( unsigned long&, unsigned long,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       unsigned long fetch_add( unsigned long,
+                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       unsigned long fetch_sub( unsigned long,
+                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       unsigned long fetch_and( unsigned long,
+                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       unsigned long fetch_or( unsigned long,
+                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       unsigned long fetch_xor( unsigned long,
+                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_ulong() = default; )
+       CPP0X( constexpr atomic_ulong( unsigned long __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_ulong( const atomic_ulong& ) = delete; )
+       atomic_ulong& operator =( const atomic_ulong& ) CPP0X(=delete);
+
+       unsigned long operator =( unsigned long __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       unsigned long operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       unsigned long operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       unsigned long operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       unsigned long operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       unsigned long operator +=( unsigned long __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       unsigned long operator -=( unsigned long __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       unsigned long operator &=( unsigned long __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       unsigned long operator |=( unsigned long __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       unsigned long operator ^=( unsigned long __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_ulong*, unsigned long,
+                                                                                                                                                memory_order );
+       friend unsigned long atomic_load_explicit( volatile atomic_ulong*,
+                                                                                                                                                                                memory_order );
+       friend unsigned long atomic_exchange_explicit( volatile atomic_ulong*,
+                                                                                                                                                                                                unsigned long, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ulong*,
+                                                                                                                                                                                                                unsigned long*, unsigned long, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ulong*,
+                                                                                                                                                                                                                        unsigned long*, unsigned long, memory_order, memory_order );
+       friend unsigned long atomic_fetch_add_explicit( volatile atomic_ulong*,
+                                                                                                                                                                                                       unsigned long, memory_order );
+       friend unsigned long atomic_fetch_sub_explicit( volatile atomic_ulong*,
+                                                                                                                                                                                                       unsigned long, memory_order );
+       friend unsigned long atomic_fetch_and_explicit( volatile atomic_ulong*,
+                                                                                                                                                                                                       unsigned long, memory_order );
+       friend unsigned long atomic_fetch_or_explicit(  volatile atomic_ulong*,
+                                                                                                                                                                                                       unsigned long, memory_order );
+       friend unsigned long atomic_fetch_xor_explicit( volatile atomic_ulong*,
+                                                                                                                                                                                                       unsigned long, memory_order );
+
+       CPP0X(private:)
 #endif
-    unsigned long __f__;
+       unsigned long __f__;
 } atomic_ulong;
 
 
 typedef struct atomic_llong
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( long long,
-                memory_order = memory_order_seq_cst ) volatile;
-    long long load( memory_order = memory_order_seq_cst ) volatile;
-    long long exchange( long long,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( long long&, long long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( long long&, long long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( long long&, long long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( long long&, long long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    long long fetch_add( long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long long fetch_sub( long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long long fetch_and( long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long long fetch_or( long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long long fetch_xor( long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_llong() = default; )
-    CPP0X( constexpr atomic_llong( long long __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_llong( const atomic_llong& ) = delete; )
-    atomic_llong& operator =( const atomic_llong& ) CPP0X(=delete);
-
-    long long operator =( long long __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    long long operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    long long operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    long long operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    long long operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    long long operator +=( long long __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    long long operator -=( long long __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    long long operator &=( long long __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    long long operator |=( long long __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    long long operator ^=( long long __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_llong*, long long,
-                                       memory_order );
-    friend long long atomic_load_explicit( volatile atomic_llong*,
-                                             memory_order );
-    friend long long atomic_exchange_explicit( volatile atomic_llong*,
-                                             long long, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_llong*,
-                      long long*, long long, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_llong*,
-                      long long*, long long, memory_order, memory_order );
-    friend long long atomic_fetch_add_explicit( volatile atomic_llong*,
-                                                  long long, memory_order );
-    friend long long atomic_fetch_sub_explicit( volatile atomic_llong*,
-                                                  long long, memory_order );
-    friend long long atomic_fetch_and_explicit( volatile atomic_llong*,
-                                                  long long, memory_order );
-    friend long long atomic_fetch_or_explicit(  volatile atomic_llong*,
-                                                  long long, memory_order );
-    friend long long atomic_fetch_xor_explicit( volatile atomic_llong*,
-                                                  long long, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( long long,
+                                                       memory_order = memory_order_seq_cst ) volatile;
+       long long load( memory_order = memory_order_seq_cst ) volatile;
+       long long exchange( long long,
+                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( long long&, long long,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( long long&, long long,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( long long&, long long,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( long long&, long long,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       long long fetch_add( long long,
+                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       long long fetch_sub( long long,
+                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       long long fetch_and( long long,
+                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       long long fetch_or( long long,
+                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       long long fetch_xor( long long,
+                                                                                        memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_llong() = default; )
+       CPP0X( constexpr atomic_llong( long long __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_llong( const atomic_llong& ) = delete; )
+       atomic_llong& operator =( const atomic_llong& ) CPP0X(=delete);
+
+       long long operator =( long long __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       long long operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       long long operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       long long operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       long long operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       long long operator +=( long long __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       long long operator -=( long long __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       long long operator &=( long long __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       long long operator |=( long long __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       long long operator ^=( long long __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_llong*, long long,
+                                                                                                                                                memory_order );
+       friend long long atomic_load_explicit( volatile atomic_llong*,
+                                                                                                                                                                memory_order );
+       friend long long atomic_exchange_explicit( volatile atomic_llong*,
+                                                                                                                                                                                long long, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_llong*,
+                                                                                                                                                                                                                long long*, long long, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_llong*,
+                                                                                                                                                                                                                        long long*, long long, memory_order, memory_order );
+       friend long long atomic_fetch_add_explicit( volatile atomic_llong*,
+                                                                                                                                                                                       long long, memory_order );
+       friend long long atomic_fetch_sub_explicit( volatile atomic_llong*,
+                                                                                                                                                                                       long long, memory_order );
+       friend long long atomic_fetch_and_explicit( volatile atomic_llong*,
+                                                                                                                                                                                       long long, memory_order );
+       friend long long atomic_fetch_or_explicit(  volatile atomic_llong*,
+                                                                                                                                                                                       long long, memory_order );
+       friend long long atomic_fetch_xor_explicit( volatile atomic_llong*,
+                                                                                                                                                                                       long long, memory_order );
+
+       CPP0X(private:)
 #endif
-    long long __f__;
+       long long __f__;
 } atomic_llong;
 
 
 typedef struct atomic_ullong
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( unsigned long long,
-                memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long load( memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long exchange( unsigned long long,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( unsigned long long&, unsigned long long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( unsigned long long&, unsigned long long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( unsigned long long&, unsigned long long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( unsigned long long&, unsigned long long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long fetch_add( unsigned long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long fetch_sub( unsigned long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long fetch_and( unsigned long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long fetch_or( unsigned long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long fetch_xor( unsigned long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_ullong() = default; )
-    CPP0X( constexpr atomic_ullong( unsigned long long __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_ullong( const atomic_ullong& ) = delete; )
-    atomic_ullong& operator =( const atomic_ullong& ) CPP0X(=delete);
-
-    unsigned long long operator =( unsigned long long __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    unsigned long long operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    unsigned long long operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    unsigned long long operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    unsigned long long operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    unsigned long long operator +=( unsigned long long __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    unsigned long long operator -=( unsigned long long __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    unsigned long long operator &=( unsigned long long __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    unsigned long long operator |=( unsigned long long __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    unsigned long long operator ^=( unsigned long long __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_ullong*, unsigned long long,
-                                       memory_order );
-    friend unsigned long long atomic_load_explicit( volatile atomic_ullong*,
-                                             memory_order );
-    friend unsigned long long atomic_exchange_explicit( volatile atomic_ullong*,
-                                             unsigned long long, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ullong*,
-                      unsigned long long*, unsigned long long, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ullong*,
-                      unsigned long long*, unsigned long long, memory_order, memory_order );
-    friend unsigned long long atomic_fetch_add_explicit( volatile atomic_ullong*,
-                                                  unsigned long long, memory_order );
-    friend unsigned long long atomic_fetch_sub_explicit( volatile atomic_ullong*,
-                                                  unsigned long long, memory_order );
-    friend unsigned long long atomic_fetch_and_explicit( volatile atomic_ullong*,
-                                                  unsigned long long, memory_order );
-    friend unsigned long long atomic_fetch_or_explicit(  volatile atomic_ullong*,
-                                                  unsigned long long, memory_order );
-    friend unsigned long long atomic_fetch_xor_explicit( volatile atomic_ullong*,
-                                                  unsigned long long, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( unsigned long long,
+                                                       memory_order = memory_order_seq_cst ) volatile;
+       unsigned long long load( memory_order = memory_order_seq_cst ) volatile;
+       unsigned long long exchange( unsigned long long,
+                                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( unsigned long long&, unsigned long long,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( unsigned long long&, unsigned long long,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( unsigned long long&, unsigned long long,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( unsigned long long&, unsigned long long,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       unsigned long long fetch_add( unsigned long long,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       unsigned long long fetch_sub( unsigned long long,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       unsigned long long fetch_and( unsigned long long,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       unsigned long long fetch_or( unsigned long long,
+                                                                                                                        memory_order = memory_order_seq_cst ) volatile;
+       unsigned long long fetch_xor( unsigned long long,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_ullong() = default; )
+       CPP0X( constexpr atomic_ullong( unsigned long long __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_ullong( const atomic_ullong& ) = delete; )
+       atomic_ullong& operator =( const atomic_ullong& ) CPP0X(=delete);
+
+       unsigned long long operator =( unsigned long long __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       unsigned long long operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       unsigned long long operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       unsigned long long operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       unsigned long long operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       unsigned long long operator +=( unsigned long long __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       unsigned long long operator -=( unsigned long long __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       unsigned long long operator &=( unsigned long long __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       unsigned long long operator |=( unsigned long long __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       unsigned long long operator ^=( unsigned long long __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_ullong*, unsigned long long,
+                                                                                                                                                memory_order );
+       friend unsigned long long atomic_load_explicit( volatile atomic_ullong*,
+                                                                                                                                                                                                       memory_order );
+       friend unsigned long long atomic_exchange_explicit( volatile atomic_ullong*,
+                                                                                                                                                                                                                       unsigned long long, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ullong*,
+                                                                                                                                                                                                                unsigned long long*, unsigned long long, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ullong*,
+                                                                                                                                                                                                                        unsigned long long*, unsigned long long, memory_order, memory_order );
+       friend unsigned long long atomic_fetch_add_explicit( volatile atomic_ullong*,
+                                                                                                                                                                                                                        unsigned long long, memory_order );
+       friend unsigned long long atomic_fetch_sub_explicit( volatile atomic_ullong*,
+                                                                                                                                                                                                                        unsigned long long, memory_order );
+       friend unsigned long long atomic_fetch_and_explicit( volatile atomic_ullong*,
+                                                                                                                                                                                                                        unsigned long long, memory_order );
+       friend unsigned long long atomic_fetch_or_explicit(  volatile atomic_ullong*,
+                                                                                                                                                                                                                        unsigned long long, memory_order );
+       friend unsigned long long atomic_fetch_xor_explicit( volatile atomic_ullong*,
+                                                                                                                                                                                                                        unsigned long long, memory_order );
+
+       CPP0X(private:)
 #endif
-    unsigned long long __f__;
+       unsigned long long __f__;
 } atomic_ullong;
 
 
@@ -1252,89 +1265,90 @@ typedef atomic_ullong atomic_uintmax_t;
 typedef struct atomic_wchar_t
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( wchar_t, memory_order = memory_order_seq_cst ) volatile;
-    wchar_t load( memory_order = memory_order_seq_cst ) volatile;
-    wchar_t exchange( wchar_t,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( wchar_t&, wchar_t,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( wchar_t&, wchar_t,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( wchar_t&, wchar_t,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( wchar_t&, wchar_t,
-                       memory_order = memory_order_seq_cst ) volatile;
-    wchar_t fetch_add( wchar_t,
-                           memory_order = memory_order_seq_cst ) volatile;
-    wchar_t fetch_sub( wchar_t,
-                           memory_order = memory_order_seq_cst ) volatile;
-    wchar_t fetch_and( wchar_t,
-                           memory_order = memory_order_seq_cst ) volatile;
-    wchar_t fetch_or( wchar_t,
-                           memory_order = memory_order_seq_cst ) volatile;
-    wchar_t fetch_xor( wchar_t,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_wchar_t() = default; )
-    CPP0X( constexpr atomic_wchar_t( wchar_t __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_wchar_t( const atomic_wchar_t& ) = delete; )
-    atomic_wchar_t& operator =( const atomic_wchar_t& ) CPP0X(=delete);
-
-    wchar_t operator =( wchar_t __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    wchar_t operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    wchar_t operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    wchar_t operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    wchar_t operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    wchar_t operator +=( wchar_t __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    wchar_t operator -=( wchar_t __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    wchar_t operator &=( wchar_t __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    wchar_t operator |=( wchar_t __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    wchar_t operator ^=( wchar_t __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_wchar_t*, wchar_t,
-                                       memory_order );
-    friend wchar_t atomic_load_explicit( volatile atomic_wchar_t*,
-                                             memory_order );
-    friend wchar_t atomic_exchange_explicit( volatile atomic_wchar_t*,
-                                             wchar_t, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_wchar_t*,
-                    wchar_t*, wchar_t, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_wchar_t*,
-                    wchar_t*, wchar_t, memory_order, memory_order );
-    friend wchar_t atomic_fetch_add_explicit( volatile atomic_wchar_t*,
-                                                  wchar_t, memory_order );
-    friend wchar_t atomic_fetch_sub_explicit( volatile atomic_wchar_t*,
-                                                  wchar_t, memory_order );
-    friend wchar_t atomic_fetch_and_explicit( volatile atomic_wchar_t*,
-                                                  wchar_t, memory_order );
-    friend wchar_t atomic_fetch_or_explicit( volatile atomic_wchar_t*,
-                                                  wchar_t, memory_order );
-    friend wchar_t atomic_fetch_xor_explicit( volatile atomic_wchar_t*,
-                                                  wchar_t, memory_order );
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( wchar_t, memory_order = memory_order_seq_cst ) volatile;
+       wchar_t load( memory_order = memory_order_seq_cst ) volatile;
+       wchar_t exchange( wchar_t,
+                                                                               memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( wchar_t&, wchar_t,
+                                                                                                                       memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( wchar_t&, wchar_t,
+                                                                                                                               memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( wchar_t&, wchar_t,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( wchar_t&, wchar_t,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       wchar_t fetch_add( wchar_t,
+                                                                                memory_order = memory_order_seq_cst ) volatile;
+       wchar_t fetch_sub( wchar_t,
+                                                                                memory_order = memory_order_seq_cst ) volatile;
+       wchar_t fetch_and( wchar_t,
+                                                                                memory_order = memory_order_seq_cst ) volatile;
+       wchar_t fetch_or( wchar_t,
+                                                                               memory_order = memory_order_seq_cst ) volatile;
+       wchar_t fetch_xor( wchar_t,
+                                                                                memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic_wchar_t() = default; )
+       CPP0X( constexpr atomic_wchar_t( wchar_t __v__ ) : __f__( __v__) {
+               } )
+       CPP0X( atomic_wchar_t( const atomic_wchar_t& ) = delete; )
+       atomic_wchar_t& operator =( const atomic_wchar_t& ) CPP0X(=delete);
+
+       wchar_t operator =( wchar_t __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       wchar_t operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       wchar_t operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       wchar_t operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       wchar_t operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       wchar_t operator +=( wchar_t __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       wchar_t operator -=( wchar_t __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
+
+       wchar_t operator &=( wchar_t __v__ ) volatile
+       { return fetch_and( __v__ ) & __v__; }
+
+       wchar_t operator |=( wchar_t __v__ ) volatile
+       { return fetch_or( __v__ ) | __v__; }
+
+       wchar_t operator ^=( wchar_t __v__ ) volatile
+       { return fetch_xor( __v__ ) ^ __v__; }
+
+       friend void atomic_store_explicit( volatile atomic_wchar_t*, wchar_t,
+                                                                                                                                                memory_order );
+       friend wchar_t atomic_load_explicit( volatile atomic_wchar_t*,
+                                                                                                                                                        memory_order );
+       friend wchar_t atomic_exchange_explicit( volatile atomic_wchar_t*,
+                                                                                                                                                                        wchar_t, memory_order );
+       friend bool atomic_compare_exchange_weak_explicit( volatile atomic_wchar_t*,
+                                                                                                                                                                                                                wchar_t*, wchar_t, memory_order, memory_order );
+       friend bool atomic_compare_exchange_strong_explicit( volatile atomic_wchar_t*,
+                                                                                                                                                                                                                        wchar_t*, wchar_t, memory_order, memory_order );
+       friend wchar_t atomic_fetch_add_explicit( volatile atomic_wchar_t*,
+                                                                                                                                                                               wchar_t, memory_order );
+       friend wchar_t atomic_fetch_sub_explicit( volatile atomic_wchar_t*,
+                                                                                                                                                                               wchar_t, memory_order );
+       friend wchar_t atomic_fetch_and_explicit( volatile atomic_wchar_t*,
+                                                                                                                                                                               wchar_t, memory_order );
+       friend wchar_t atomic_fetch_or_explicit( volatile atomic_wchar_t*,
+                                                                                                                                                                        wchar_t, memory_order );
+       friend wchar_t atomic_fetch_xor_explicit( volatile atomic_wchar_t*,
+                                                                                                                                                                               wchar_t, memory_order );
+
+       CPP0X(private:)
 #endif
-    wchar_t __f__;
+       wchar_t __f__;
 } atomic_wchar_t;
 
 
@@ -1354,26 +1368,27 @@ struct atomic
 {
 #ifdef __cplusplus
 
-    bool is_lock_free() const volatile;
-    void store( T, memory_order = memory_order_seq_cst ) volatile;
-    T load( memory_order = memory_order_seq_cst ) volatile;
-    T exchange( T __v__, memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( T&, T, memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( T&, T, memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( T&, T, memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( T&, T, memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( T __v__ ) : __f__( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    T operator =( T __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-CPP0X(private:)
+       bool is_lock_free() const volatile;
+       void store( T, memory_order = memory_order_seq_cst ) volatile;
+       T load( memory_order = memory_order_seq_cst ) volatile;
+       T exchange( T __v__, memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( T&, T, memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( T&, T, memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( T&, T, memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( T&, T, memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( T __v__ ) : __f__( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       T operator =( T __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       CPP0X(private:)
 #endif
-    T __f__;
+       T __f__;
 };
 
 #endif
@@ -1382,42 +1397,43 @@ CPP0X(private:)
 
 template<typename T> struct atomic< T* > : atomic_address
 {
-    T* load( memory_order = memory_order_seq_cst ) volatile;
-    T* exchange( T*, memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( T*&, T*, memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( T*&, T*, memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( T*&, T*,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( T*&, T*,
-                       memory_order = memory_order_seq_cst ) volatile;
-    T* fetch_add( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
-    T* fetch_sub( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( T __v__ ) : atomic_address( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    T* operator =( T* __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    T* operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    T* operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    T* operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    T* operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    T* operator +=( T* __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    T* operator -=( T* __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
+       T* load( memory_order = memory_order_seq_cst ) volatile;
+       T* exchange( T*, memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_weak( T*&, T*, memory_order, memory_order ) volatile;
+       bool compare_exchange_strong( T*&, T*, memory_order, memory_order ) volatile;
+       bool compare_exchange_weak( T*&, T*,
+                                                                                                                       memory_order = memory_order_seq_cst ) volatile;
+       bool compare_exchange_strong( T*&, T*,
+                                                                                                                               memory_order = memory_order_seq_cst ) volatile;
+       T* fetch_add( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
+       T* fetch_sub( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
+
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( T __v__ ) : atomic_address( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       T* operator =( T* __v__ ) volatile
+       { store( __v__ ); return __v__; }
+
+       T* operator ++( int ) volatile
+       { return fetch_add( 1 ); }
+
+       T* operator --( int ) volatile
+       { return fetch_sub( 1 ); }
+
+       T* operator ++() volatile
+       { return fetch_add( 1 ) + 1; }
+
+       T* operator --() volatile
+       { return fetch_sub( 1 ) - 1; }
+
+       T* operator +=( T* __v__ ) volatile
+       { return fetch_add( __v__ ) + __v__; }
+
+       T* operator -=( T* __v__ ) volatile
+       { return fetch_sub( __v__ ) - __v__; }
 };
 
 #endif
@@ -1427,183 +1443,197 @@ template<typename T> struct atomic< T* > : atomic_address
 
 template<> struct atomic< bool > : atomic_bool
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( bool __v__ )
-    : atomic_bool( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    bool operator =( bool __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( bool __v__ )
+                                        : atomic_bool( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       bool operator =( bool __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< void* > : atomic_address
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( void* __v__ )
-    : atomic_address( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    void* operator =( void* __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( void* __v__ )
+                                        : atomic_address( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       void* operator =( void* __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< char > : atomic_char
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( char __v__ )
-    : atomic_char( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    char operator =( char __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( char __v__ )
+                                        : atomic_char( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       char operator =( char __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< signed char > : atomic_schar
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( signed char __v__ )
-    : atomic_schar( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    signed char operator =( signed char __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( signed char __v__ )
+                                        : atomic_schar( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       signed char operator =( signed char __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< unsigned char > : atomic_uchar
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( unsigned char __v__ )
-    : atomic_uchar( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    unsigned char operator =( unsigned char __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( unsigned char __v__ )
+                                        : atomic_uchar( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       unsigned char operator =( unsigned char __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< short > : atomic_short
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( short __v__ )
-    : atomic_short( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    short operator =( short __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( short __v__ )
+                                        : atomic_short( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       short operator =( short __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< unsigned short > : atomic_ushort
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( unsigned short __v__ )
-    : atomic_ushort( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    unsigned short operator =( unsigned short __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( unsigned short __v__ )
+                                        : atomic_ushort( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       unsigned short operator =( unsigned short __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< int > : atomic_int
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( int __v__ )
-    : atomic_int( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    int operator =( int __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( int __v__ )
+                                        : atomic_int( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       int operator =( int __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< unsigned int > : atomic_uint
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( unsigned int __v__ )
-    : atomic_uint( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    unsigned int operator =( unsigned int __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( unsigned int __v__ )
+                                        : atomic_uint( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       unsigned int operator =( unsigned int __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< long > : atomic_long
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( long __v__ )
-    : atomic_long( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    long operator =( long __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( long __v__ )
+                                        : atomic_long( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       long operator =( long __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< unsigned long > : atomic_ulong
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( unsigned long __v__ )
-    : atomic_ulong( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    unsigned long operator =( unsigned long __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( unsigned long __v__ )
+                                        : atomic_ulong( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       unsigned long operator =( unsigned long __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< long long > : atomic_llong
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( long long __v__ )
-    : atomic_llong( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    long long operator =( long long __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( long long __v__ )
+                                        : atomic_llong( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       long long operator =( long long __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< unsigned long long > : atomic_ullong
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( unsigned long long __v__ )
-    : atomic_ullong( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    unsigned long long operator =( unsigned long long __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( unsigned long long __v__ )
+                                        : atomic_ullong( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       unsigned long long operator =( unsigned long long __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< wchar_t > : atomic_wchar_t
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( wchar_t __v__ )
-    : atomic_wchar_t( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    wchar_t operator =( wchar_t __v__ ) volatile
-    { store( __v__ ); return __v__; }
+       CPP0X( atomic() = default; )
+       CPP0X( constexpr explicit atomic( wchar_t __v__ )
+                                        : atomic_wchar_t( __v__ ) {
+               } )
+       CPP0X( atomic( const atomic& ) = delete; )
+       atomic& operator =( const atomic& ) CPP0X(=delete);
+
+       wchar_t operator =( wchar_t __v__ ) volatile
+       { store( __v__ ); return __v__; }
 };
 
 
@@ -1614,1287 +1644,1344 @@ template<> struct atomic< wchar_t > : atomic_wchar_t
 
 
 inline bool atomic_is_lock_free
-( const volatile atomic_bool* __a__ )
+       ( const volatile atomic_bool* __a__ )
 { return false; }
 
 inline bool atomic_load_explicit
-( volatile atomic_bool* __a__, memory_order __x__ )
+       ( volatile atomic_bool* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline bool atomic_load
-( volatile atomic_bool* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
+       ( volatile atomic_bool* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_bool* __a__, bool __m__ )
+       ( volatile atomic_bool* __a__, bool __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_bool* __a__, bool __m__, memory_order __x__ )
+       ( volatile atomic_bool* __a__, bool __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_bool* __a__, bool __m__ )
+       ( volatile atomic_bool* __a__, bool __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_exchange_explicit
-( volatile atomic_bool* __a__, bool __m__, memory_order __x__ )
+       ( volatile atomic_bool* __a__, bool __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline bool atomic_exchange
-( volatile atomic_bool* __a__, bool __m__ )
+       ( volatile atomic_bool* __a__, bool __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_bool* __a__, bool* __e__, bool __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_bool* __a__, bool* __e__, bool __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_bool* __a__, bool* __e__, bool __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_bool* __a__, bool* __e__, bool __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_bool* __a__, bool* __e__, bool __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_bool* __a__, bool* __e__, bool __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_bool* __a__, bool* __e__, bool __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_bool* __a__, bool* __e__, bool __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_address* __a__ )
 { return false; }
 
 inline void* atomic_load_explicit
-( volatile atomic_address* __a__, memory_order __x__ )
+       ( volatile atomic_address* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline void* atomic_load( volatile atomic_address* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_address* __a__, void* __m__ )
+       ( volatile atomic_address* __a__, void* __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_address* __a__, void* __m__, memory_order __x__ )
+       ( volatile atomic_address* __a__, void* __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_address* __a__, void* __m__ )
+       ( volatile atomic_address* __a__, void* __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline void* atomic_exchange_explicit
-( volatile atomic_address* __a__, void* __m__, memory_order __x__ )
+       ( volatile atomic_address* __a__, void* __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__,  __x__ ); }
 
 inline void* atomic_exchange
-( volatile atomic_address* __a__, void* __m__ )
+       ( volatile atomic_address* __a__, void* __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_address* __a__, void** __e__, void* __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_address* __a__, void** __e__, void* __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_address* __a__, void** __e__, void* __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_address* __a__, void** __e__, void* __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_address* __a__, void** __e__, void* __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_address* __a__, void** __e__, void* __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_address* __a__, void** __e__, void* __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_address* __a__, void** __e__, void* __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_char* __a__ )
 { return false; }
 
 inline char atomic_load_explicit
-( volatile atomic_char* __a__, memory_order __x__ )
+       ( volatile atomic_char* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline char atomic_load( volatile atomic_char* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_char* __a__, char __m__ )
+       ( volatile atomic_char* __a__, char __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+       ( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_char* __a__, char __m__ )
+       ( volatile atomic_char* __a__, char __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline char atomic_exchange_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+       ( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline char atomic_exchange
-( volatile atomic_char* __a__, char __m__ )
+       ( volatile atomic_char* __a__, char __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_char* __a__, char* __e__, char __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_char* __a__, char* __e__, char __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_char* __a__, char* __e__, char __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_char* __a__, char* __e__, char __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_char* __a__, char* __e__, char __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_char* __a__, char* __e__, char __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_char* __a__, char* __e__, char __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_char* __a__, char* __e__, char __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_schar* __a__ )
 { return false; }
 
 inline signed char atomic_load_explicit
-( volatile atomic_schar* __a__, memory_order __x__ )
+       ( volatile atomic_schar* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline signed char atomic_load( volatile atomic_schar* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_schar* __a__, signed char __m__ )
+       ( volatile atomic_schar* __a__, signed char __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+       ( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_schar* __a__, signed char __m__ )
+       ( volatile atomic_schar* __a__, signed char __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline signed char atomic_exchange_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+       ( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline signed char atomic_exchange
-( volatile atomic_schar* __a__, signed char __m__ )
+       ( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_schar* __a__, signed char* __e__, signed char __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_schar* __a__, signed char* __e__, signed char __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_schar* __a__, signed char* __e__, signed char __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_schar* __a__, signed char* __e__, signed char __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_schar* __a__, signed char* __e__, signed char __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_schar* __a__, signed char* __e__, signed char __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_schar* __a__, signed char* __e__, signed char __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_schar* __a__, signed char* __e__, signed char __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_uchar* __a__ )
 { return false; }
 
 inline unsigned char atomic_load_explicit
-( volatile atomic_uchar* __a__, memory_order __x__ )
+       ( volatile atomic_uchar* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline unsigned char atomic_load( volatile atomic_uchar* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline unsigned char atomic_exchange_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline unsigned char atomic_exchange
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_short* __a__ )
 { return false; }
 
 inline short atomic_load_explicit
-( volatile atomic_short* __a__, memory_order __x__ )
+       ( volatile atomic_short* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline short atomic_load( volatile atomic_short* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_short* __a__, short __m__ )
+       ( volatile atomic_short* __a__, short __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+       ( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_short* __a__, short __m__ )
+       ( volatile atomic_short* __a__, short __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline short atomic_exchange_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+       ( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline short atomic_exchange
-( volatile atomic_short* __a__, short __m__ )
+       ( volatile atomic_short* __a__, short __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_short* __a__, short* __e__, short __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_short* __a__, short* __e__, short __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_short* __a__, short* __e__, short __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_short* __a__, short* __e__, short __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_short* __a__, short* __e__, short __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_short* __a__, short* __e__, short __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_short* __a__, short* __e__, short __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_short* __a__, short* __e__, short __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_ushort* __a__ )
 { return false; }
 
 inline unsigned short atomic_load_explicit
-( volatile atomic_ushort* __a__, memory_order __x__ )
+       ( volatile atomic_ushort* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline unsigned short atomic_load( volatile atomic_ushort* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline unsigned short atomic_exchange_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline unsigned short atomic_exchange
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_int* __a__ )
 { return false; }
 
 inline int atomic_load_explicit
-( volatile atomic_int* __a__, memory_order __x__ )
+       ( volatile atomic_int* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline int atomic_load( volatile atomic_int* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_int* __a__, int __m__ )
+       ( volatile atomic_int* __a__, int __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+       ( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_int* __a__, int __m__ )
+       ( volatile atomic_int* __a__, int __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline int atomic_exchange_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+       ( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline int atomic_exchange
-( volatile atomic_int* __a__, int __m__ )
+       ( volatile atomic_int* __a__, int __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_int* __a__, int* __e__, int __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_int* __a__, int* __e__, int __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_int* __a__, int* __e__, int __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_int* __a__, int* __e__, int __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_int* __a__, int* __e__, int __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_int* __a__, int* __e__, int __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_int* __a__, int* __e__, int __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_int* __a__, int* __e__, int __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_uint* __a__ )
 { return false; }
 
 inline unsigned int atomic_load_explicit
-( volatile atomic_uint* __a__, memory_order __x__ )
+       ( volatile atomic_uint* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline unsigned int atomic_load( volatile atomic_uint* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_uint* __a__, unsigned int __m__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_uint* __a__, unsigned int __m__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline unsigned int atomic_exchange_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline unsigned int atomic_exchange
-( volatile atomic_uint* __a__, unsigned int __m__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_long* __a__ )
 { return false; }
 
 inline long atomic_load_explicit
-( volatile atomic_long* __a__, memory_order __x__ )
+       ( volatile atomic_long* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline long atomic_load( volatile atomic_long* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_long* __a__, long __m__ )
+       ( volatile atomic_long* __a__, long __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+       ( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_long* __a__, long __m__ )
+       ( volatile atomic_long* __a__, long __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline long atomic_exchange_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+       ( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline long atomic_exchange
-( volatile atomic_long* __a__, long __m__ )
+       ( volatile atomic_long* __a__, long __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_long* __a__, long* __e__, long __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_long* __a__, long* __e__, long __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_long* __a__, long* __e__, long __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_long* __a__, long* __e__, long __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_long* __a__, long* __e__, long __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_long* __a__, long* __e__, long __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_long* __a__, long* __e__, long __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_long* __a__, long* __e__, long __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_ulong* __a__ )
 { return false; }
 
 inline unsigned long atomic_load_explicit
-( volatile atomic_ulong* __a__, memory_order __x__ )
+       ( volatile atomic_ulong* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline unsigned long atomic_load( volatile atomic_ulong* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline unsigned long atomic_exchange_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline unsigned long atomic_exchange
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_llong* __a__ )
 { return false; }
 
 inline long long atomic_load_explicit
-( volatile atomic_llong* __a__, memory_order __x__ )
+       ( volatile atomic_llong* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline long long atomic_load( volatile atomic_llong* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_llong* __a__, long long __m__ )
+       ( volatile atomic_llong* __a__, long long __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+       ( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_llong* __a__, long long __m__ )
+       ( volatile atomic_llong* __a__, long long __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline long long atomic_exchange_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+       ( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline long long atomic_exchange
-( volatile atomic_llong* __a__, long long __m__ )
+       ( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_llong* __a__, long long* __e__, long long __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_llong* __a__, long long* __e__, long long __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_llong* __a__, long long* __e__, long long __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_llong* __a__, long long* __e__, long long __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_llong* __a__, long long* __e__, long long __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_llong* __a__, long long* __e__, long long __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_llong* __a__, long long* __e__, long long __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_llong* __a__, long long* __e__, long long __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_ullong* __a__ )
 { return false; }
 
 inline unsigned long long atomic_load_explicit
-( volatile atomic_ullong* __a__, memory_order __x__ )
+       ( volatile atomic_ullong* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline unsigned long long atomic_load( volatile atomic_ullong* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline unsigned long long atomic_exchange_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline unsigned long long atomic_exchange
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_wchar_t* __a__ )
 { return false; }
 
 inline wchar_t atomic_load_explicit
-( volatile atomic_wchar_t* __a__, memory_order __x__ )
+       ( volatile atomic_wchar_t* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline wchar_t atomic_load( volatile atomic_wchar_t* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline wchar_t atomic_exchange_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline wchar_t atomic_exchange
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__,
-  memory_order __x__, memory_order __y__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__,
+       memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__ )
+{
+       return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                               memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+       ( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__ )
+{
+       return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+                                                                                                                                                                                                       memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline void* atomic_fetch_add_explicit
-( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
+       ( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
 {
-       volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);
-       __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__);
-       __typeof__((__a__)->__f__) __copy__= __old__;
+       volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);
+       __typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
+       __typeof__((__a__)->__f__)__copy__= __old__;
        __copy__ = (void *) (((char *)__copy__) + __m__);
        model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
        return __old__;
 }
 
- inline void* atomic_fetch_add
-( volatile atomic_address* __a__, ptrdiff_t __m__ )
+inline void* atomic_fetch_add
+       ( volatile atomic_address* __a__, ptrdiff_t __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline void* atomic_fetch_sub_explicit
-( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
-{      volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);
-       __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__);
-       __typeof__((__a__)->__f__) __copy__= __old__;
+       ( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
+{
+       volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);
+       __typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
+       __typeof__((__a__)->__f__)__copy__= __old__;
        __copy__ = (void *) (((char *)__copy__) - __m__);
        model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
        return __old__;
 }
 
 inline void* atomic_fetch_sub
-( volatile atomic_address* __a__, ptrdiff_t __m__ )
+       ( volatile atomic_address* __a__, ptrdiff_t __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline char atomic_fetch_add_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+       ( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline char atomic_fetch_add
-( volatile atomic_char* __a__, char __m__ )
+       ( volatile atomic_char* __a__, char __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline char atomic_fetch_sub_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+       ( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline char atomic_fetch_sub
-( volatile atomic_char* __a__, char __m__ )
+       ( volatile atomic_char* __a__, char __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline char atomic_fetch_and_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+       ( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline char atomic_fetch_and
-( volatile atomic_char* __a__, char __m__ )
+       ( volatile atomic_char* __a__, char __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline char atomic_fetch_or_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+       ( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline char atomic_fetch_or
-( volatile atomic_char* __a__, char __m__ )
+       ( volatile atomic_char* __a__, char __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline char atomic_fetch_xor_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+       ( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline char atomic_fetch_xor
-( volatile atomic_char* __a__, char __m__ )
+       ( volatile atomic_char* __a__, char __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline signed char atomic_fetch_add_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+       ( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline signed char atomic_fetch_add
-( volatile atomic_schar* __a__, signed char __m__ )
+       ( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline signed char atomic_fetch_sub_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+       ( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline signed char atomic_fetch_sub
-( volatile atomic_schar* __a__, signed char __m__ )
+       ( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline signed char atomic_fetch_and_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+       ( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline signed char atomic_fetch_and
-( volatile atomic_schar* __a__, signed char __m__ )
+       ( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline signed char atomic_fetch_or_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+       ( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline signed char atomic_fetch_or
-( volatile atomic_schar* __a__, signed char __m__ )
+       ( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline signed char atomic_fetch_xor_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+       ( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline signed char atomic_fetch_xor
-( volatile atomic_schar* __a__, signed char __m__ )
+       ( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned char atomic_fetch_add_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline unsigned char atomic_fetch_add
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned char atomic_fetch_sub_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline unsigned char atomic_fetch_sub
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned char atomic_fetch_and_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline unsigned char atomic_fetch_and
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned char atomic_fetch_or_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline unsigned char atomic_fetch_or
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned char atomic_fetch_xor_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline unsigned char atomic_fetch_xor
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+       ( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline short atomic_fetch_add_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+       ( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline short atomic_fetch_add
-( volatile atomic_short* __a__, short __m__ )
+       ( volatile atomic_short* __a__, short __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline short atomic_fetch_sub_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+       ( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline short atomic_fetch_sub
-( volatile atomic_short* __a__, short __m__ )
+       ( volatile atomic_short* __a__, short __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline short atomic_fetch_and_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+       ( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline short atomic_fetch_and
-( volatile atomic_short* __a__, short __m__ )
+       ( volatile atomic_short* __a__, short __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline short atomic_fetch_or_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+       ( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline short atomic_fetch_or
-( volatile atomic_short* __a__, short __m__ )
+       ( volatile atomic_short* __a__, short __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline short atomic_fetch_xor_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+       ( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline short atomic_fetch_xor
-( volatile atomic_short* __a__, short __m__ )
+       ( volatile atomic_short* __a__, short __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned short atomic_fetch_add_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline unsigned short atomic_fetch_add
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned short atomic_fetch_sub_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline unsigned short atomic_fetch_sub
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned short atomic_fetch_and_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline unsigned short atomic_fetch_and
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned short atomic_fetch_or_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline unsigned short atomic_fetch_or
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned short atomic_fetch_xor_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline unsigned short atomic_fetch_xor
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+       ( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline int atomic_fetch_add_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+       ( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline int atomic_fetch_add
-( volatile atomic_int* __a__, int __m__ )
+       ( volatile atomic_int* __a__, int __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline int atomic_fetch_sub_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+       ( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline int atomic_fetch_sub
-( volatile atomic_int* __a__, int __m__ )
+       ( volatile atomic_int* __a__, int __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline int atomic_fetch_and_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+       ( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline int atomic_fetch_and
-( volatile atomic_int* __a__, int __m__ )
+       ( volatile atomic_int* __a__, int __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline int atomic_fetch_or_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+       ( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline int atomic_fetch_or
-( volatile atomic_int* __a__, int __m__ )
+       ( volatile atomic_int* __a__, int __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline int atomic_fetch_xor_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+       ( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline int atomic_fetch_xor
-( volatile atomic_int* __a__, int __m__ )
+       ( volatile atomic_int* __a__, int __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned int atomic_fetch_add_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline unsigned int atomic_fetch_add
-( volatile atomic_uint* __a__, unsigned int __m__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned int atomic_fetch_sub_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline unsigned int atomic_fetch_sub
-( volatile atomic_uint* __a__, unsigned int __m__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned int atomic_fetch_and_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline unsigned int atomic_fetch_and
-( volatile atomic_uint* __a__, unsigned int __m__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned int atomic_fetch_or_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline unsigned int atomic_fetch_or
-( volatile atomic_uint* __a__, unsigned int __m__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned int atomic_fetch_xor_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline unsigned int atomic_fetch_xor
-( volatile atomic_uint* __a__, unsigned int __m__ )
+       ( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long atomic_fetch_add_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+       ( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline long atomic_fetch_add
-( volatile atomic_long* __a__, long __m__ )
+       ( volatile atomic_long* __a__, long __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long atomic_fetch_sub_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+       ( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline long atomic_fetch_sub
-( volatile atomic_long* __a__, long __m__ )
+       ( volatile atomic_long* __a__, long __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long atomic_fetch_and_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+       ( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline long atomic_fetch_and
-( volatile atomic_long* __a__, long __m__ )
+       ( volatile atomic_long* __a__, long __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long atomic_fetch_or_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+       ( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline long atomic_fetch_or
-( volatile atomic_long* __a__, long __m__ )
+       ( volatile atomic_long* __a__, long __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long atomic_fetch_xor_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+       ( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline long atomic_fetch_xor
-( volatile atomic_long* __a__, long __m__ )
+       ( volatile atomic_long* __a__, long __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long atomic_fetch_add_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline unsigned long atomic_fetch_add
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long atomic_fetch_sub_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline unsigned long atomic_fetch_sub
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long atomic_fetch_and_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline unsigned long atomic_fetch_and
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long atomic_fetch_or_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline unsigned long atomic_fetch_or
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long atomic_fetch_xor_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline unsigned long atomic_fetch_xor
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+       ( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long long atomic_fetch_add_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+       ( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline long long atomic_fetch_add
-( volatile atomic_llong* __a__, long long __m__ )
+       ( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long long atomic_fetch_sub_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+       ( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline long long atomic_fetch_sub
-( volatile atomic_llong* __a__, long long __m__ )
+       ( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long long atomic_fetch_and_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+       ( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline long long atomic_fetch_and
-( volatile atomic_llong* __a__, long long __m__ )
+       ( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long long atomic_fetch_or_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+       ( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline long long atomic_fetch_or
-( volatile atomic_llong* __a__, long long __m__ )
+       ( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long long atomic_fetch_xor_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+       ( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline long long atomic_fetch_xor
-( volatile atomic_llong* __a__, long long __m__ )
+       ( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long long atomic_fetch_add_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline unsigned long long atomic_fetch_add
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long long atomic_fetch_sub_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline unsigned long long atomic_fetch_sub
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long long atomic_fetch_and_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline unsigned long long atomic_fetch_and
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long long atomic_fetch_or_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline unsigned long long atomic_fetch_or
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long long atomic_fetch_xor_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline unsigned long long atomic_fetch_xor
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+       ( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline wchar_t atomic_fetch_add_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline wchar_t atomic_fetch_add
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline wchar_t atomic_fetch_sub_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline wchar_t atomic_fetch_sub
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline wchar_t atomic_fetch_and_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline wchar_t atomic_fetch_and
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline wchar_t atomic_fetch_or_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline wchar_t atomic_fetch_or
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline wchar_t atomic_fetch_xor_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline wchar_t atomic_fetch_xor
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+       ( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
@@ -2902,75 +2989,75 @@ inline wchar_t atomic_fetch_xor
 
 
 #define atomic_is_lock_free( __a__ ) \
-false
+       false
 
 #define atomic_load( __a__ ) \
-_ATOMIC_LOAD_( __a__, memory_order_seq_cst )
+       _ATOMIC_LOAD_( __a__, memory_order_seq_cst )
 
 #define atomic_load_explicit( __a__, __x__ ) \
-_ATOMIC_LOAD_( __a__, __x__ )
+       _ATOMIC_LOAD_( __a__, __x__ )
 
 #define atomic_init( __a__, __m__ ) \
-_ATOMIC_INIT_( __a__, __m__ )
+       _ATOMIC_INIT_( __a__, __m__ )
 
 #define atomic_store( __a__, __m__ ) \
-_ATOMIC_STORE_( __a__, __m__, memory_order_seq_cst )
+       _ATOMIC_STORE_( __a__, __m__, memory_order_seq_cst )
 
 #define atomic_store_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_STORE_( __a__, __m__, __x__ )
+       _ATOMIC_STORE_( __a__, __m__, __x__ )
 
 #define atomic_exchange( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, =, __m__, memory_order_seq_cst )
+       _ATOMIC_MODIFY_( __a__, =, __m__, memory_order_seq_cst )
 
 #define atomic_exchange_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, =, __m__, __x__ )
+       _ATOMIC_MODIFY_( __a__, =, __m__, __x__ )
 
 #define atomic_compare_exchange_weak( __a__, __e__, __m__ ) \
-_ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, memory_order_seq_cst )
+       _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, memory_order_seq_cst )
 
 #define atomic_compare_exchange_strong( __a__, __e__, __m__ ) \
-_ATOMIC_CMPSWP_( __a__, __e__, __m__, memory_order_seq_cst )
+       _ATOMIC_CMPSWP_( __a__, __e__, __m__, memory_order_seq_cst )
 
 #define atomic_compare_exchange_weak_explicit( __a__, __e__, __m__, __x__, __y__ ) \
-_ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ )
+       _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ )
 
 #define atomic_compare_exchange_strong_explicit( __a__, __e__, __m__, __x__, __y__ ) \
-_ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ )
+       _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ )
 
 
 #define atomic_fetch_add_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, +=, __m__, __x__ )
+       _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ )
 
 #define atomic_fetch_add( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, +=, __m__, memory_order_seq_cst )
+       _ATOMIC_MODIFY_( __a__, +=, __m__, memory_order_seq_cst )
 
 
 #define atomic_fetch_sub_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, -=, __m__, __x__ )
+       _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ )
 
 #define atomic_fetch_sub( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, -=, __m__, memory_order_seq_cst )
+       _ATOMIC_MODIFY_( __a__, -=, __m__, memory_order_seq_cst )
 
 
 #define atomic_fetch_and_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, &=, __m__, __x__ )
+       _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ )
 
 #define atomic_fetch_and( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, &=, __m__, memory_order_seq_cst )
+       _ATOMIC_MODIFY_( __a__, &=, __m__, memory_order_seq_cst )
 
 
 #define atomic_fetch_or_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, |=, __m__, __x__ )
+       _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ )
 
 #define atomic_fetch_or( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, |=, __m__, memory_order_seq_cst )
+       _ATOMIC_MODIFY_( __a__, |=, __m__, memory_order_seq_cst )
 
 
 #define atomic_fetch_xor_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ )
+       _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ )
 
 #define atomic_fetch_xor( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, ^=, __m__, memory_order_seq_cst )
+       _ATOMIC_MODIFY_( __a__, ^=, __m__, memory_order_seq_cst )
 
 
 #endif
@@ -2983,532 +3070,588 @@ inline bool atomic_bool::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_bool::store
-( bool __m__, memory_order __x__ ) volatile
+       ( bool __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_bool::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline bool atomic_bool::exchange
-( bool __m__, memory_order __x__ ) volatile
+       ( bool __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_bool::compare_exchange_weak
-( bool& __e__, bool __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( bool& __e__, bool __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_bool::compare_exchange_strong
-( bool& __e__, bool __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( bool& __e__, bool __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_bool::compare_exchange_weak
-( bool& __e__, bool __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( bool& __e__, bool __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_bool::compare_exchange_strong
-( bool& __e__, bool __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( bool& __e__, bool __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_address::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_address::store
-( void* __m__, memory_order __x__ ) volatile
+       ( void* __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline void* atomic_address::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline void* atomic_address::exchange
-( void* __m__, memory_order __x__ ) volatile
+       ( void* __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_address::compare_exchange_weak
-( void*& __e__, void* __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( void*& __e__, void* __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_address::compare_exchange_strong
-( void*& __e__, void* __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( void*& __e__, void* __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_address::compare_exchange_weak
-( void*& __e__, void* __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( void*& __e__, void* __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_address::compare_exchange_strong
-( void*& __e__, void* __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( void*& __e__, void* __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_char::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_char::store
-( char __m__, memory_order __x__ ) volatile
+       ( char __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline char atomic_char::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline char atomic_char::exchange
-( char __m__, memory_order __x__ ) volatile
+       ( char __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_char::compare_exchange_weak
-( char& __e__, char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( char& __e__, char __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_char::compare_exchange_strong
-( char& __e__, char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( char& __e__, char __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_char::compare_exchange_weak
-( char& __e__, char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( char& __e__, char __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_char::compare_exchange_strong
-( char& __e__, char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( char& __e__, char __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_schar::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_schar::store
-( signed char __m__, memory_order __x__ ) volatile
+       ( signed char __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline signed char atomic_schar::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline signed char atomic_schar::exchange
-( signed char __m__, memory_order __x__ ) volatile
+       ( signed char __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_schar::compare_exchange_weak
-( signed char& __e__, signed char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( signed char& __e__, signed char __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_schar::compare_exchange_strong
-( signed char& __e__, signed char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( signed char& __e__, signed char __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_schar::compare_exchange_weak
-( signed char& __e__, signed char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( signed char& __e__, signed char __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_schar::compare_exchange_strong
-( signed char& __e__, signed char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( signed char& __e__, signed char __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_uchar::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_uchar::store
-( unsigned char __m__, memory_order __x__ ) volatile
+       ( unsigned char __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline unsigned char atomic_uchar::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline unsigned char atomic_uchar::exchange
-( unsigned char __m__, memory_order __x__ ) volatile
+       ( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_uchar::compare_exchange_weak
-( unsigned char& __e__, unsigned char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( unsigned char& __e__, unsigned char __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_uchar::compare_exchange_strong
-( unsigned char& __e__, unsigned char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( unsigned char& __e__, unsigned char __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_uchar::compare_exchange_weak
-( unsigned char& __e__, unsigned char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( unsigned char& __e__, unsigned char __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_uchar::compare_exchange_strong
-( unsigned char& __e__, unsigned char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( unsigned char& __e__, unsigned char __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_short::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_short::store
-( short __m__, memory_order __x__ ) volatile
+       ( short __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline short atomic_short::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline short atomic_short::exchange
-( short __m__, memory_order __x__ ) volatile
+       ( short __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_short::compare_exchange_weak
-( short& __e__, short __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( short& __e__, short __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_short::compare_exchange_strong
-( short& __e__, short __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( short& __e__, short __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_short::compare_exchange_weak
-( short& __e__, short __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( short& __e__, short __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_short::compare_exchange_strong
-( short& __e__, short __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( short& __e__, short __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_ushort::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_ushort::store
-( unsigned short __m__, memory_order __x__ ) volatile
+       ( unsigned short __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline unsigned short atomic_ushort::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline unsigned short atomic_ushort::exchange
-( unsigned short __m__, memory_order __x__ ) volatile
+       ( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_ushort::compare_exchange_weak
-( unsigned short& __e__, unsigned short __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( unsigned short& __e__, unsigned short __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ushort::compare_exchange_strong
-( unsigned short& __e__, unsigned short __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( unsigned short& __e__, unsigned short __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ushort::compare_exchange_weak
-( unsigned short& __e__, unsigned short __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( unsigned short& __e__, unsigned short __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_ushort::compare_exchange_strong
-( unsigned short& __e__, unsigned short __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( unsigned short& __e__, unsigned short __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_int::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_int::store
-( int __m__, memory_order __x__ ) volatile
+       ( int __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline int atomic_int::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline int atomic_int::exchange
-( int __m__, memory_order __x__ ) volatile
+       ( int __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_int::compare_exchange_weak
-( int& __e__, int __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( int& __e__, int __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_int::compare_exchange_strong
-( int& __e__, int __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( int& __e__, int __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_int::compare_exchange_weak
-( int& __e__, int __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( int& __e__, int __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_int::compare_exchange_strong
-( int& __e__, int __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( int& __e__, int __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_uint::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_uint::store
-( unsigned int __m__, memory_order __x__ ) volatile
+       ( unsigned int __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline unsigned int atomic_uint::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline unsigned int atomic_uint::exchange
-( unsigned int __m__, memory_order __x__ ) volatile
+       ( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_uint::compare_exchange_weak
-( unsigned int& __e__, unsigned int __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( unsigned int& __e__, unsigned int __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_uint::compare_exchange_strong
-( unsigned int& __e__, unsigned int __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( unsigned int& __e__, unsigned int __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_uint::compare_exchange_weak
-( unsigned int& __e__, unsigned int __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( unsigned int& __e__, unsigned int __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_uint::compare_exchange_strong
-( unsigned int& __e__, unsigned int __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( unsigned int& __e__, unsigned int __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_long::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_long::store
-( long __m__, memory_order __x__ ) volatile
+       ( long __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline long atomic_long::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline long atomic_long::exchange
-( long __m__, memory_order __x__ ) volatile
+       ( long __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_long::compare_exchange_weak
-( long& __e__, long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( long& __e__, long __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_long::compare_exchange_strong
-( long& __e__, long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( long& __e__, long __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_long::compare_exchange_weak
-( long& __e__, long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( long& __e__, long __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_long::compare_exchange_strong
-( long& __e__, long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( long& __e__, long __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_ulong::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_ulong::store
-( unsigned long __m__, memory_order __x__ ) volatile
+       ( unsigned long __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline unsigned long atomic_ulong::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline unsigned long atomic_ulong::exchange
-( unsigned long __m__, memory_order __x__ ) volatile
+       ( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_ulong::compare_exchange_weak
-( unsigned long& __e__, unsigned long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( unsigned long& __e__, unsigned long __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ulong::compare_exchange_strong
-( unsigned long& __e__, unsigned long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( unsigned long& __e__, unsigned long __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ulong::compare_exchange_weak
-( unsigned long& __e__, unsigned long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( unsigned long& __e__, unsigned long __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_ulong::compare_exchange_strong
-( unsigned long& __e__, unsigned long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( unsigned long& __e__, unsigned long __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_llong::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_llong::store
-( long long __m__, memory_order __x__ ) volatile
+       ( long long __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline long long atomic_llong::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline long long atomic_llong::exchange
-( long long __m__, memory_order __x__ ) volatile
+       ( long long __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_llong::compare_exchange_weak
-( long long& __e__, long long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( long long& __e__, long long __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_llong::compare_exchange_strong
-( long long& __e__, long long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( long long& __e__, long long __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_llong::compare_exchange_weak
-( long long& __e__, long long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( long long& __e__, long long __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_llong::compare_exchange_strong
-( long long& __e__, long long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( long long& __e__, long long __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_ullong::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_ullong::store
-( unsigned long long __m__, memory_order __x__ ) volatile
+       ( unsigned long long __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline unsigned long long atomic_ullong::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline unsigned long long atomic_ullong::exchange
-( unsigned long long __m__, memory_order __x__ ) volatile
+       ( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_ullong::compare_exchange_weak
-( unsigned long long& __e__, unsigned long long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( unsigned long long& __e__, unsigned long long __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ullong::compare_exchange_strong
-( unsigned long long& __e__, unsigned long long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( unsigned long long& __e__, unsigned long long __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ullong::compare_exchange_weak
-( unsigned long long& __e__, unsigned long long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( unsigned long long& __e__, unsigned long long __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_ullong::compare_exchange_strong
-( unsigned long long& __e__, unsigned long long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( unsigned long long& __e__, unsigned long long __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_wchar_t::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_wchar_t::store
-( wchar_t __m__, memory_order __x__ ) volatile
+       ( wchar_t __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline wchar_t atomic_wchar_t::load
-( memory_order __x__ ) volatile
+       ( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline wchar_t atomic_wchar_t::exchange
-( wchar_t __m__, memory_order __x__ ) volatile
+       ( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_wchar_t::compare_exchange_weak
-( wchar_t& __e__, wchar_t __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( wchar_t& __e__, wchar_t __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_wchar_t::compare_exchange_strong
-( wchar_t& __e__, wchar_t __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+       ( wchar_t& __e__, wchar_t __m__,
+       memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_wchar_t::compare_exchange_weak
-( wchar_t& __e__, wchar_t __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( wchar_t& __e__, wchar_t __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_wchar_t::compare_exchange_strong
-( wchar_t& __e__, wchar_t __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( wchar_t& __e__, wchar_t __m__, memory_order __x__ ) volatile
+{
+       return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+                                                                                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 template< typename T >
@@ -3529,335 +3672,339 @@ inline T atomic<T>::exchange( T __v__, memory_order __x__ ) volatile
 
 template< typename T >
 inline bool atomic<T>::compare_exchange_weak
-( T& __r__, T __v__, memory_order __x__, memory_order __y__ ) volatile
+       ( T& __r__, T __v__, memory_order __x__, memory_order __y__ ) volatile
 { return _ATOMIC_CMPSWP_WEAK_( this, &__r__, __v__, __x__ ); }
 
 template< typename T >
 inline bool atomic<T>::compare_exchange_strong
-( T& __r__, T __v__, memory_order __x__, memory_order __y__ ) volatile
+       ( T& __r__, T __v__, memory_order __x__, memory_order __y__ ) volatile
 { return _ATOMIC_CMPSWP_( this, &__r__, __v__, __x__ ); }
 
 template< typename T >
 inline bool atomic<T>::compare_exchange_weak
-( T& __r__, T __v__, memory_order __x__ ) volatile
-{ return compare_exchange_weak( __r__, __v__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( T& __r__, T __v__, memory_order __x__ ) volatile
+{
+       return compare_exchange_weak( __r__, __v__, __x__,
+                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 template< typename T >
 inline bool atomic<T>::compare_exchange_strong
-( T& __r__, T __v__, memory_order __x__ ) volatile
-{ return compare_exchange_strong( __r__, __v__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( T& __r__, T __v__, memory_order __x__ ) volatile
+{
+       return compare_exchange_strong( __r__, __v__, __x__,
+                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline void* atomic_address::fetch_add
-( ptrdiff_t __m__, memory_order __x__ ) volatile
+       ( ptrdiff_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 inline void* atomic_address::fetch_sub
-( ptrdiff_t __m__, memory_order __x__ ) volatile
+       ( ptrdiff_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline char atomic_char::fetch_add
-( char __m__, memory_order __x__ ) volatile
+       ( char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline char atomic_char::fetch_sub
-( char __m__, memory_order __x__ ) volatile
+       ( char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline char atomic_char::fetch_and
-( char __m__, memory_order __x__ ) volatile
+       ( char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline char atomic_char::fetch_or
-( char __m__, memory_order __x__ ) volatile
+       ( char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline char atomic_char::fetch_xor
-( char __m__, memory_order __x__ ) volatile
+       ( char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline signed char atomic_schar::fetch_add
-( signed char __m__, memory_order __x__ ) volatile
+       ( signed char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline signed char atomic_schar::fetch_sub
-( signed char __m__, memory_order __x__ ) volatile
+       ( signed char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline signed char atomic_schar::fetch_and
-( signed char __m__, memory_order __x__ ) volatile
+       ( signed char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline signed char atomic_schar::fetch_or
-( signed char __m__, memory_order __x__ ) volatile
+       ( signed char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline signed char atomic_schar::fetch_xor
-( signed char __m__, memory_order __x__ ) volatile
+       ( signed char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned char atomic_uchar::fetch_add
-( unsigned char __m__, memory_order __x__ ) volatile
+       ( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned char atomic_uchar::fetch_sub
-( unsigned char __m__, memory_order __x__ ) volatile
+       ( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned char atomic_uchar::fetch_and
-( unsigned char __m__, memory_order __x__ ) volatile
+       ( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned char atomic_uchar::fetch_or
-( unsigned char __m__, memory_order __x__ ) volatile
+       ( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned char atomic_uchar::fetch_xor
-( unsigned char __m__, memory_order __x__ ) volatile
+       ( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline short atomic_short::fetch_add
-( short __m__, memory_order __x__ ) volatile
+       ( short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline short atomic_short::fetch_sub
-( short __m__, memory_order __x__ ) volatile
+       ( short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline short atomic_short::fetch_and
-( short __m__, memory_order __x__ ) volatile
+       ( short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline short atomic_short::fetch_or
-( short __m__, memory_order __x__ ) volatile
+       ( short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline short atomic_short::fetch_xor
-( short __m__, memory_order __x__ ) volatile
+       ( short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned short atomic_ushort::fetch_add
-( unsigned short __m__, memory_order __x__ ) volatile
+       ( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned short atomic_ushort::fetch_sub
-( unsigned short __m__, memory_order __x__ ) volatile
+       ( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned short atomic_ushort::fetch_and
-( unsigned short __m__, memory_order __x__ ) volatile
+       ( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned short atomic_ushort::fetch_or
-( unsigned short __m__, memory_order __x__ ) volatile
+       ( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned short atomic_ushort::fetch_xor
-( unsigned short __m__, memory_order __x__ ) volatile
+       ( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline int atomic_int::fetch_add
-( int __m__, memory_order __x__ ) volatile
+       ( int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline int atomic_int::fetch_sub
-( int __m__, memory_order __x__ ) volatile
+       ( int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline int atomic_int::fetch_and
-( int __m__, memory_order __x__ ) volatile
+       ( int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline int atomic_int::fetch_or
-( int __m__, memory_order __x__ ) volatile
+       ( int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline int atomic_int::fetch_xor
-( int __m__, memory_order __x__ ) volatile
+       ( int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned int atomic_uint::fetch_add
-( unsigned int __m__, memory_order __x__ ) volatile
+       ( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned int atomic_uint::fetch_sub
-( unsigned int __m__, memory_order __x__ ) volatile
+       ( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned int atomic_uint::fetch_and
-( unsigned int __m__, memory_order __x__ ) volatile
+       ( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned int atomic_uint::fetch_or
-( unsigned int __m__, memory_order __x__ ) volatile
+       ( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned int atomic_uint::fetch_xor
-( unsigned int __m__, memory_order __x__ ) volatile
+       ( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline long atomic_long::fetch_add
-( long __m__, memory_order __x__ ) volatile
+       ( long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline long atomic_long::fetch_sub
-( long __m__, memory_order __x__ ) volatile
+       ( long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline long atomic_long::fetch_and
-( long __m__, memory_order __x__ ) volatile
+       ( long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline long atomic_long::fetch_or
-( long __m__, memory_order __x__ ) volatile
+       ( long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline long atomic_long::fetch_xor
-( long __m__, memory_order __x__ ) volatile
+       ( long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long atomic_ulong::fetch_add
-( unsigned long __m__, memory_order __x__ ) volatile
+       ( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long atomic_ulong::fetch_sub
-( unsigned long __m__, memory_order __x__ ) volatile
+       ( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long atomic_ulong::fetch_and
-( unsigned long __m__, memory_order __x__ ) volatile
+       ( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long atomic_ulong::fetch_or
-( unsigned long __m__, memory_order __x__ ) volatile
+       ( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long atomic_ulong::fetch_xor
-( unsigned long __m__, memory_order __x__ ) volatile
+       ( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline long long atomic_llong::fetch_add
-( long long __m__, memory_order __x__ ) volatile
+       ( long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline long long atomic_llong::fetch_sub
-( long long __m__, memory_order __x__ ) volatile
+       ( long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline long long atomic_llong::fetch_and
-( long long __m__, memory_order __x__ ) volatile
+       ( long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline long long atomic_llong::fetch_or
-( long long __m__, memory_order __x__ ) volatile
+       ( long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline long long atomic_llong::fetch_xor
-( long long __m__, memory_order __x__ ) volatile
+       ( long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long long atomic_ullong::fetch_add
-( unsigned long long __m__, memory_order __x__ ) volatile
+       ( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long long atomic_ullong::fetch_sub
-( unsigned long long __m__, memory_order __x__ ) volatile
+       ( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long long atomic_ullong::fetch_and
-( unsigned long long __m__, memory_order __x__ ) volatile
+       ( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long long atomic_ullong::fetch_or
-( unsigned long long __m__, memory_order __x__ ) volatile
+       ( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long long atomic_ullong::fetch_xor
-( unsigned long long __m__, memory_order __x__ ) volatile
+       ( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline wchar_t atomic_wchar_t::fetch_add
-( wchar_t __m__, memory_order __x__ ) volatile
+       ( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline wchar_t atomic_wchar_t::fetch_sub
-( wchar_t __m__, memory_order __x__ ) volatile
+       ( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline wchar_t atomic_wchar_t::fetch_and
-( wchar_t __m__, memory_order __x__ ) volatile
+       ( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline wchar_t atomic_wchar_t::fetch_or
-( wchar_t __m__, memory_order __x__ ) volatile
+       ( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline wchar_t atomic_wchar_t::fetch_xor
-( wchar_t __m__, memory_order __x__ ) volatile
+       ( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
@@ -3871,31 +4018,39 @@ T* atomic<T*>::exchange( T* __v__, memory_order __x__ ) volatile
 
 template< typename T >
 bool atomic<T*>::compare_exchange_weak
-( T*& __r__, T* __v__, memory_order __x__, memory_order __y__) volatile
-{ return atomic_address::compare_exchange_weak( *reinterpret_cast<void**>( &__r__ ),
-               static_cast<void*>( __v__ ), __x__, __y__ ); }
+       ( T*& __r__, T* __v__, memory_order __x__, memory_order __y__) volatile
+{
+       return atomic_address::compare_exchange_weak( *reinterpret_cast<void**>( &__r__ ),
+                                                                                                                                                                                               static_cast<void*>( __v__ ), __x__, __y__ );
+}
 //{ return _ATOMIC_CMPSWP_WEAK_( this, &__r__, __v__, __x__ ); }
 
 template< typename T >
 bool atomic<T*>::compare_exchange_strong
-( T*& __r__, T* __v__, memory_order __x__, memory_order __y__) volatile
-{ return atomic_address::compare_exchange_strong( *reinterpret_cast<void**>( &__r__ ),
-               static_cast<void*>( __v__ ), __x__, __y__ ); }
+       ( T*& __r__, T* __v__, memory_order __x__, memory_order __y__) volatile
+{
+       return atomic_address::compare_exchange_strong( *reinterpret_cast<void**>( &__r__ ),
+                                                                                                                                                                                                       static_cast<void*>( __v__ ), __x__, __y__ );
+}
 //{ return _ATOMIC_CMPSWP_( this, &__r__, __v__, __x__ ); }
 
 template< typename T >
 bool atomic<T*>::compare_exchange_weak
-( T*& __r__, T* __v__, memory_order __x__ ) volatile
-{ return compare_exchange_weak( __r__, __v__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( T*& __r__, T* __v__, memory_order __x__ ) volatile
+{
+       return compare_exchange_weak( __r__, __v__, __x__,
+                                                                                                                               __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                               __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 template< typename T >
 bool atomic<T*>::compare_exchange_strong
-( T*& __r__, T* __v__, memory_order __x__ ) volatile
-{ return compare_exchange_strong( __r__, __v__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+       ( T*& __r__, T* __v__, memory_order __x__ ) volatile
+{
+       return compare_exchange_strong( __r__, __v__, __x__,
+                                                                                                                                       __x__ == memory_order_acq_rel ? memory_order_acquire :
+                                                                                                                                       __x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 template< typename T >
 T* atomic<T*>::fetch_add( ptrdiff_t __v__, memory_order __x__ ) volatile
@@ -3916,14 +4071,14 @@ static inline void atomic_thread_fence(memory_order order)
 
 /** @todo Do we want to try to support a user's signal-handler? */
 static inline void atomic_signal_fence(memory_order order)
-{ /* No-op? */ }
+{       /* No-op? */ }
 #ifdef __cplusplus
 }
 #endif
 
 
 #ifdef __cplusplus
-} // namespace std
+}      // namespace std
 #endif
 
-#endif /* __IMPATOMIC_H__ */
+#endif /* __IMPATOMIC_H__ */
index 83e05d92cd88325555b82aeabfb95dcc1d3ae491..68a6d635721ccd0f76f4dad38436f37056a8e20b 100644 (file)
 extern "C" {
 #endif
 
-       void store_8(void *addr, uint8_t val);
-       void store_16(void *addr, uint16_t val);
-       void store_32(void *addr, uint32_t val);
-       void store_64(void *addr, uint64_t val);
-
-       uint8_t load_8(const void *addr);
-       uint16_t load_16(const void *addr);
-       uint32_t load_32(const void *addr);
-       uint64_t load_64(const void *addr);
-
-       void cds_store8(void *addr);
-       void cds_store16(void *addr);
-       void cds_store32(void *addr);
-       void cds_store64(void *addr);
-
-       void cds_load8(const void *addr);
-       void cds_load16(const void *addr);
-       void cds_load32(const void *addr);
-       void cds_load64(const void *addr);
+void store_8(void *addr, uint8_t val);
+void store_16(void *addr, uint16_t val);
+void store_32(void *addr, uint32_t val);
+void store_64(void *addr, uint64_t val);
+
+uint8_t load_8(const void *addr);
+uint16_t load_16(const void *addr);
+uint32_t load_32(const void *addr);
+uint64_t load_64(const void *addr);
+
+void cds_store8(void *addr);
+void cds_store16(void *addr);
+void cds_store32(void *addr);
+void cds_store64(void *addr);
+
+void cds_load8(const void *addr);
+void cds_load16(const void *addr);
+void cds_load32(const void *addr);
+void cds_load64(const void *addr);
 
 #ifdef __cplusplus
 }
 #endif
 
-#endif /* __LIBRACE_H__ */
+#endif /* __LIBRACE_H__ */
index ca496f1a3e0ef753a51a091a984193aaf568f606..2117b5eb75fffbdf133267902390006839523ca9 100644 (file)
@@ -13,10 +13,10 @@ namespace std {
 #endif
 
 typedef enum memory_order {
-    memory_order_relaxed, memory_order_consume, memory_order_acquire, 
-    memory_order_release, memory_order_acq_rel, memory_order_seq_cst
+       memory_order_relaxed, memory_order_consume, memory_order_acquire,
+       memory_order_release, memory_order_acq_rel, memory_order_seq_cst
 } memory_order;
-  
+
 #ifdef __cplusplus
 }
 #endif
index ddc44278b8a9f45a1ca36e4e957a16d684b15883..9180e983453d682521567d84b437a8b8f0ae1cae 100644 (file)
@@ -14,4 +14,4 @@ void model_assert(bool expr, const char *file, int line);
 }
 #endif
 
-#endif /* __MODEL_ASSERT_H__ */
+#endif /* __MODEL_ASSERT_H__ */
index 34525d2c8e1abc1aaeba97d53d3bbb0bc3808bd3..c60f0a85d16e9fb20fb02cd7dee63c3e4724eede 100644 (file)
@@ -21,8 +21,8 @@
  */
 typedef int thread_id_t;
 
-#define THREAD_ID_T_NONE       -1
+#define THREAD_ID_T_NONE        -1
 
 typedef unsigned int modelclock_t;
 
-#endif /* __MODELTYPES_H__ */
+#endif /* __MODELTYPES_H__ */
index e015ea599f7373d59125f3eaf69fcfce7e1f3a0e..d90d6645e11ab7f00e1d5456a3543c9339895c8b 100644 (file)
@@ -7,29 +7,36 @@
 #define __CXX_MUTEX__
 
 #include "modeltypes.h"
-//#include <mutex>
+#include "mymemory.h"
 
 namespace cdsc {
-       struct mutex_state {
-               void *locked; /* Thread holding the lock */
-               thread_id_t alloc_tid;
-               modelclock_t alloc_clock;
-               int init; // WL
-       };
+struct mutex_state {
+       void *locked;   /* Thread holding the lock */
+       thread_id_t alloc_tid;
+       modelclock_t alloc_clock;
+       int init;       // WL
+};
 
-       class mutex {
-       public:
-               mutex();
-               ~mutex() {}
-               void lock();
-               bool try_lock();
-               void unlock();
-               struct mutex_state * get_state() {return &state;}
-               void initialize() { state.init = 1; } // WL
-               bool is_initialized() { return state.init == 1; }
-               
-       private:
-               struct mutex_state state;
-       };
+class mutex {
+public:
+       mutex();
+       ~mutex() {}
+       void lock();
+       bool try_lock();
+       void unlock();
+       struct mutex_state * get_state() {return &state;}
+       void initialize() { state.init = 1; }   // WL
+       bool is_initialized() { return state.init == 1; }
+
+private:
+       struct mutex_state state;
+};
+
+class snapmutex : public mutex {
+public:
+       snapmutex() : mutex()
+       { }
+       SNAPSHOTALLOC
+};
 }
-#endif /* __CXX_MUTEX__ */
+#endif /* __CXX_MUTEX__ */
index c55ce485f7e57ad8f13830f2e6d2ad99d5ff65ea..cffd8c2d1cb93b90ff43f8e56890af13167336e0 100644 (file)
@@ -12,8 +12,8 @@
 typedef void *(*pthread_start_t)(void *);
 
 struct pthread_params {
-    pthread_start_t func;
-    void *arg;
+       pthread_start_t func;
+       void *arg;
 };
 
 extern "C" {
@@ -28,7 +28,7 @@ int pthread_attr_getdetachstate(const pthread_attr_t *, int *);
 int pthread_attr_getguardsize(const pthread_attr_t *, size_t *);
 int pthread_attr_getinheritsched(const pthread_attr_t *, int *);
 int pthread_attr_getschedparam(const pthread_attr_t *,
-          struct sched_param *);
+                                                                                                                        struct sched_param *);
 int pthread_attr_getschedpolicy(const pthread_attr_t *, int *);
 int pthread_attr_getscope(const pthread_attr_t *, int *);
 int pthread_attr_getstackaddr(const pthread_attr_t *, void **);
@@ -38,7 +38,7 @@ int pthread_attr_setdetachstate(pthread_attr_t *, int);
 int pthread_attr_setguardsize(pthread_attr_t *, size_t);
 int pthread_attr_setinheritsched(pthread_attr_t *, int);
 int pthread_attr_setschedparam(pthread_attr_t *,
-          const struct sched_param *);
+                                                                                                                        const struct sched_param *);
 int pthread_attr_setschedpolicy(pthread_attr_t *, int);
 int pthread_attr_setscope(pthread_attr_t *, int);
 int pthread_attr_setstackaddr(pthread_attr_t *, void *);
@@ -63,7 +63,7 @@ int pthread_mutex_getprioceiling(const pthread_mutex_t *, int *);
 int pthread_mutex_setprioceiling(pthread_mutex_t *, int, int *);
 int pthread_mutexattr_destroy(pthread_mutexattr_t *);
 int pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *,
-          int *);
+                                                                                                                                                int *);
 int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *, int *);
 int pthread_mutexattr_getpshared(const pthread_mutexattr_t *, int *);
 int pthread_mutexattr_gettype(const pthread_mutexattr_t *, int *);
@@ -75,7 +75,7 @@ int pthread_mutexattr_settype(pthread_mutexattr_t *, int);
 int pthread_once(pthread_once_t *, void (*)(void));
 int pthread_rwlock_destroy(pthread_rwlock_t *);
 int pthread_rwlock_init(pthread_rwlock_t *,
-          const pthread_rwlockattr_t *);
+                                                                                               const pthread_rwlockattr_t *);
 int pthread_rwlock_rdlock(pthread_rwlock_t *);
 int pthread_rwlock_tryrdlock(pthread_rwlock_t *);
 int pthread_rwlock_trywrlock(pthread_rwlock_t *);
@@ -83,14 +83,14 @@ int pthread_rwlock_unlock(pthread_rwlock_t *);
 int pthread_rwlock_wrlock(pthread_rwlock_t *);
 int pthread_rwlockattr_destroy(pthread_rwlockattr_t *);
 int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *,
-          int *);
+                                                                                                                                       int *);
 int pthread_rwlockattr_init(pthread_rwlockattr_t *);
 int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *, int);
 int pthread_setcancelstate(int, int *);
 int pthread_setcanceltype(int, int *);
 int pthread_setconcurrency(int);
-int pthread_setschedparam(pthread_t, int ,
-          const struct sched_param *);
+int pthread_setschedparam(pthread_t, int,
+                                                                                                       const struct sched_param *);
 int pthread_setspecific(pthread_key_t, const void *);
 void pthread_testcancel(void);
 
index d4d21984ea8a1ea37f5db767a1deffca56a05c21..783e934c40d7062c5eeb78429c34a1e1abda190f 100644 (file)
@@ -67,6 +67,6 @@ using std::memory_order_seq_cst;
 using std::atomic_thread_fence;
 using std::atomic_signal_fence;
 
-#endif /* __cplusplus */
+#endif /* __cplusplus */
 
-#endif /* __STDATOMIC_H__ */
+#endif /* __STDATOMIC_H__ */
index f38be0ab96ecf973873ffc1c03576da0dad47c28..7c84e095baf738608fc3820016e5caf941cf1a88 100644 (file)
@@ -17,21 +17,21 @@ typedef void *__thread_identifier;
 extern "C" {
 #endif
 
-       typedef void (*thrd_start_t)(void *);
+typedef void (*thrd_start_t)(void *);
 
-       typedef struct {
-               __thread_identifier priv;
-       } thrd_t;
+typedef struct {
+       __thread_identifier priv;
+} thrd_t;
 
-       int thrd_create(thrd_t *t, thrd_start_t start_routine, void *arg);
-       int thrd_join(thrd_t);
-       void thrd_yield(void);
-       thrd_t thrd_current(void);
+int thrd_create(thrd_t *t, thrd_start_t start_routine, void *arg);
+int thrd_join(thrd_t);
+void thrd_yield(void);
+thrd_t thrd_current(void);
 
-       int user_main(int, char**);
+int user_main(int, char**);
 
 #ifdef __cplusplus
 }
 #endif
 
-#endif /* __THREADS_H__ */
+#endif /* __THREADS_H__ */
index c7b9f265744f823dee2b9525551475b08e531591..0eaffd5eafc66753546035d59c628df44db40d6f 100644 (file)
 #define is_normal_mo_infer(x) ((x >= memory_order_relaxed && x <= memory_order_seq_cst) || x == WILDCARD_NONEXIST || x == memory_order_normal)
 #define is_normal_mo(x) ((x >= memory_order_relaxed && x <= memory_order_seq_cst) || x == memory_order_normal)
 
-#define assert_infer(x) for (int i = 0; i <= wildcardNum; i++)\
-       ASSERT(is_normal_mo_infer((x[i])));
+#define assert_infer(x) for (int i = 0;i <= wildcardNum;i++) \
+               ASSERT(is_normal_mo_infer((x[i])));
 
-#define assert_infers(x) for (ModelList<memory_order *>::iterator iter =\
-       (x)->begin(); iter != (x)->end(); iter++)\
-       assert_infer((*iter));
+#define assert_infers(x) for (ModelList<memory_order *>::iterator iter = \
+                                                                                                                               (x)->begin();iter != (x)->end();iter++) \
+               assert_infer((*iter));
 
 #define relaxed memory_order_relaxed
 #define release memory_order_release
diff --git a/main.cc b/main.cc
index 0abc80b7e766063c5b3304be14d3a86dbc049045..a484f497458ee078270aa2cd52ccc2a20c551c1a 100644 (file)
--- a/main.cc
+++ b/main.cc
@@ -17,7 +17,7 @@
 #include "snapshot-interface.h"
 #include "plugins.h"
 
-static void param_defaults(struct model_params *params)
+void param_defaults(struct model_params *params)
 {
        params->verbose = !!DBG_ENABLED();
        params->uninitvalue = 0;
@@ -161,33 +161,6 @@ static void install_trace_analyses(ModelExecution *execution)
        }
 }
 
-/** The model_main function contains the main model checking loop. */
-static void model_main()
-{
-       struct model_params params;
-
-       param_defaults(&params);
-       register_plugins();
-
-       parse_options(&params, main_argc, main_argv);
-
-       //Initialize race detector
-       initRaceDetector();
-
-       snapshot_stack_init();
-
-       if (!model)
-               model = new ModelChecker();
-       model->setParams(params);
-       install_trace_analyses(model->get_execution());
-
-       snapshot_record(0);
-       model->run();
-       delete model;
-
-       DEBUG("Exiting\n");
-}
-
 /**
  * Main function.  Just initializes snapshotting library and the
  * snapshotting library calls the model_main function.
@@ -212,6 +185,26 @@ int main(int argc, char **argv)
        /* Configure output redirection for the model-checker */
        redirect_output();
 
-       /* Let's jump in quickly and start running stuff */
-       snapshot_system_init(10000, 1024, 1024, 40000, &model_main);
+       //Initialize snapshotting library and model checker object
+       if (!model) {
+               snapshot_system_init(10000, 1024, 1024, 40000);
+               model = new ModelChecker();
+               model->startChecker();
+       }
+
+       register_plugins();
+
+       //Parse command line options
+       model_params *params = model->getParams();
+       parse_options(params, main_argc, main_argv);
+
+       //Initialize race detector
+       initRaceDetector();
+
+       snapshot_stack_init();
+       install_trace_analyses(model->get_execution());
+
+       snapshot_record(0);
+       model->startMainThread();
+       DEBUG("Exiting\n");
 }
index 3235f5c488461dda1918fb2cd1bc5293441effa1..b8d9f7cb4ce46b7c01ed7aa08fe982985a7a9cd0 100644 (file)
--- a/model.cc
+++ b/model.cc
 #include "execution.h"
 #include "history.h"
 #include "bugmessage.h"
+#include "params.h"
 
-ModelChecker *model;
+ModelChecker *model = NULL;
+
+/** Wrapper to run the user's main function, with appropriate arguments */
+void user_main_wrapper(void *)
+{
+       user_main(model->params.argc, model->params.argv);
+}
 
 /** @brief Constructor */
 ModelChecker::ModelChecker() :
@@ -35,6 +42,11 @@ ModelChecker::ModelChecker() :
        inspect_plugin(NULL)
 {
        memset(&stats,0,sizeof(struct execution_stats));
+       init_thread = new Thread(execution->get_next_id(), (thrd_t *) model_malloc(sizeof(thrd_t)), &user_main_wrapper, NULL, NULL);    // L: user_main_wrapper passes the user program
+       execution->add_thread(init_thread);
+       scheduler->set_current_thread(init_thread);
+       execution->setParams(&params);
+       param_defaults(&params);
 }
 
 /** @brief Destructor */
@@ -45,9 +57,8 @@ ModelChecker::~ModelChecker()
 }
 
 /** Method to set parameters */
-void ModelChecker::setParams(struct model_params params) {
-       this->params = params;
-       execution->setParams(&params);
+model_params * ModelChecker::getParams() {
+       return &params;
 }
 
 /**
@@ -56,8 +67,6 @@ void ModelChecker::setParams(struct model_params params) {
  */
 void ModelChecker::reset_to_initial_state()
 {
-       DEBUG("+++ Resetting to initial state +++\n");
-       node_stack->reset_execution();
 
        /**
         * FIXME: if we utilize partial rollback, we will need to free only
@@ -153,7 +162,7 @@ void ModelChecker::print_bugs() const
                                                        bugs->size(),
                                                        bugs->size() > 1 ? "s" : "");
        for (unsigned int i = 0;i < bugs->size();i++)
-               (*bugs)[i]->print();
+               (*bugs)[i] -> print();
 }
 
 /**
@@ -164,15 +173,15 @@ void ModelChecker::print_bugs() const
  */
 void ModelChecker::record_stats()
 {
-       stats.num_total++;
+       stats.num_total ++;
        if (!execution->isfeasibleprefix())
-               stats.num_infeasible++;
+               stats.num_infeasible ++;
        else if (execution->have_bug_reports())
-               stats.num_buggy_executions++;
+               stats.num_buggy_executions ++;
        else if (execution->is_complete_execution())
-               stats.num_complete++;
+               stats.num_complete ++;
        else {
-               stats.num_redundant++;
+               stats.num_redundant ++;
 
                /**
                 * @todo We can violate this ASSERT() when fairness/sleep sets
@@ -253,16 +262,15 @@ bool ModelChecker::next_execution()
                return true;
        }
 // test code
-       execution_number++;
+       execution_number ++;
        reset_to_initial_state();
-       node_stack->full_reset();
        return false;
 }
 
 /** @brief Run trace analyses on complete trace */
 void ModelChecker::run_trace_analyses() {
-       for (unsigned int i = 0;i < trace_analyses.size();i++)
-               trace_analyses[i]->analyze(execution->get_action_trace());
+       for (unsigned int i = 0;i < trace_analyses.size();i ++)
+               trace_analyses[i] -> analyze(execution->get_action_trace());
 }
 
 /**
@@ -315,10 +323,11 @@ uint64_t ModelChecker::switch_to_master(ModelAction *act)
        Thread *old = thread_current();
        scheduler->set_current_thread(NULL);
        ASSERT(!old->get_pending());
-/* W: No plugin
-        if (inspect_plugin != NULL) {
-                inspect_plugin->inspectModelAction(act);
-        }*/
+
+       if (inspect_plugin != NULL) {
+               inspect_plugin->inspectModelAction(act);
+       }
+
        old->set_pending(act);
        if (Thread::swap(old, &system_context) < 0) {
                perror("swap threads");
@@ -327,10 +336,13 @@ uint64_t ModelChecker::switch_to_master(ModelAction *act)
        return old->get_return_value();
 }
 
-/** Wrapper to run the user's main function, with appropriate arguments */
-void user_main_wrapper(void *)
-{
-       user_main(model->params.argc, model->params.argv);
+static void runChecker() {
+       model->run();
+       delete model;
+}
+
+void ModelChecker::startChecker() {
+       startExecution(get_system_context(), runChecker);
 }
 
 bool ModelChecker::should_terminate_execution()
@@ -356,11 +368,30 @@ void ModelChecker::do_restart()
 {
        restart_flag = false;
        reset_to_initial_state();
-       node_stack->full_reset();
        memset(&stats,0,sizeof(struct execution_stats));
        execution_number = 1;
 }
 
+void ModelChecker::startMainThread() {
+       init_thread->set_state(THREAD_RUNNING);
+       scheduler->set_current_thread(init_thread);
+       thread_startup();
+}
+
+static bool is_nonsc_write(const ModelAction *act) {
+       if (act->get_type() == ATOMIC_WRITE) {
+               std::memory_order order = act->get_mo();
+               switch(order) {
+               case std::memory_order_relaxed:
+               case std::memory_order_release:
+                       return true;
+               default:
+                       return false;
+               }
+       }
+       return false;
+}
+
 /** @brief Run ModelChecker for the user program */
 void ModelChecker::run()
 {
@@ -369,10 +400,8 @@ void ModelChecker::run()
        initstate(423121, random_state, sizeof(random_state));
 
        for(int exec = 0;exec < params.maxexecutions;exec++) {
-               thrd_t user_thread;
-               Thread *t = new Thread(execution->get_next_id(), &user_thread, &user_main_wrapper, NULL, NULL); // L: user_main_wrapper passes the user program
-               execution->add_thread(t);
-               //Need to seed random number generator, otherwise its state gets reset
+               Thread * t = init_thread;
+
                do {
                        /*
                         * Stash next pending action(s) for thread(s). There
@@ -380,11 +409,11 @@ void ModelChecker::run()
                         * thread which just took a step--plus the first step
                         * for any newly-created thread
                         */
-
+                       ModelAction * pending;
                        for (unsigned int i = 0;i < get_num_threads();i++) {
                                thread_id_t tid = int_to_id(i);
                                Thread *thr = get_thread(tid);
-                               if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
+                               if (!thr->is_model_thread() && !thr->is_complete() && ((!(pending=thr->get_pending())) || is_nonsc_write(pending)) ) {
                                        switch_from_master(thr);        // L: context swapped, and action type of thr changed.
                                        if (thr->is_waiting_on(thr))
                                                assert_bug("Deadlock detected (thread %u)", i);
diff --git a/model.h b/model.h
index 4d8558e270613e968cccb01ea282f4af04d36a70..1269e76e1edf00d47951bc3c60001fee8658fe33 100644 (file)
--- a/model.h
+++ b/model.h
@@ -33,7 +33,7 @@ class ModelChecker {
 public:
        ModelChecker();
        ~ModelChecker();
-       void setParams(struct model_params params);
+       model_params * getParams();
        void run();
 
        /** Restart the model checker, intended for pluggins. */
@@ -64,7 +64,8 @@ public:
        model_params params;
        void add_trace_analysis(TraceAnalysis *a) {     trace_analyses.push_back(a); }
        void set_inspect_plugin(TraceAnalysis *a) {     inspect_plugin=a;       }
-
+       void startMainThread();
+       void startChecker();
        MEMALLOC
 private:
        /** Flag indicates whether to restart the model checker. */
@@ -74,6 +75,7 @@ private:
        Scheduler * const scheduler;
        NodeStack * const node_stack;
        ModelExecution *execution;
+       Thread * init_thread;
        ModelHistory *history;
 
        int execution_number;
@@ -106,5 +108,4 @@ private:
 };
 
 extern ModelChecker *model;
-
 #endif /* __MODEL_H__ */
index 44f64ff764b3c0c80a2c789d4f5bfcff40a7ef1e..0776db8eef5857b34c47bdbf3493194c9672097a 100644 (file)
--- a/mutex.cc
+++ b/mutex.cc
@@ -13,7 +13,8 @@ mutex::mutex()
        state.locked = NULL;
        thread_id_t tid = thread_current()->get_id();
        state.alloc_tid = tid;
-       state.alloc_clock = model->get_execution()->get_cv(tid)->getClock(tid);
+       ClockVector *cv = model->get_execution()->get_cv(tid);
+       state.alloc_clock = cv  == NULL ? 0 : cv->getClock(tid);
 }
 
 void mutex::lock()
index f7b716247e3965d5b2c2f3e255846b60aeda01c5..a85c48c31121bc5175d658fe48ddad3a62c48d0c 100644 (file)
@@ -1,3 +1,4 @@
+
 #include <stdlib.h>
 #include <stdio.h>
 #include <dlfcn.h>
@@ -264,13 +265,13 @@ void operator delete[](void *p, size_t size)
 /** @brief Snapshotting allocation function for use by the Thread class only */
 void * Thread_malloc(size_t size)
 {
-       return malloc(size);
+       return snapshot_malloc(size);
 }
 
 /** @brief Snapshotting free function for use by the Thread class only */
 void Thread_free(void *ptr)
 {
-       free(ptr);
+       snapshot_free(ptr);
 }
 
 #endif /* !USE_MPROTECT_SNAPSHOT */
index fa356d0e9ae666db61bb993423b7b915df2c815b..09a79e4057d882431b9e662373c2b3a19bfff5fb 100644 (file)
@@ -32,7 +32,7 @@ public:
        ModelAction * get_uninit_action() const { return uninit_action; }
        void print() const;
 
-       MEMALLOC
+       SNAPSHOTALLOC
 private:
        ModelAction * const action;
 
@@ -40,7 +40,7 @@ private:
        ModelAction *uninit_action;
 };
 
-typedef ModelVector<Node *> node_list_t;
+typedef SnapVector<Node *> node_list_t;
 
 /**
  * @brief A stack of nodes
@@ -63,7 +63,7 @@ public:
        void full_reset();
        void print() const;
 
-       MEMALLOC
+       SNAPSHOTALLOC
 private:
        node_list_t node_list;
        const struct model_params * get_params() const;
index db86895890d8061cbe4fedbc04d908263c5081be..7f749cae6fc082d1551fdfb86f0c5728cf176fbc 100644 (file)
--- a/params.h
+++ b/params.h
@@ -20,4 +20,6 @@ struct model_params {
        char **argv;
 };
 
+void param_defaults(struct model_params *params);
+
 #endif /* __PARAMS_H__ */
diff --git a/printf.c b/printf.c
new file mode 100644 (file)
index 0000000..8a700ad
--- /dev/null
+++ b/printf.c
@@ -0,0 +1,914 @@
+///////////////////////////////////////////////////////////////////////////////\r
+// \author (c) Marco Paland (info@paland.com)\r
+//             2014-2019, PALANDesign Hannover, Germany\r
+//\r
+// \license The MIT License (MIT)\r
+//\r
+// Permission is hereby granted, free of charge, to any person obtaining a copy\r
+// of this software and associated documentation files (the "Software"), to deal\r
+// in the Software without restriction, including without limitation the rights\r
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r
+// copies of the Software, and to permit persons to whom the Software is\r
+// furnished to do so, subject to the following conditions:\r
+//\r
+// The above copyright notice and this permission notice shall be included in\r
+// all copies or substantial portions of the Software.\r
+//\r
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r
+// THE SOFTWARE.\r
+//\r
+// \brief Tiny printf, sprintf and (v)snprintf implementation, optimized for speed on\r
+//        embedded systems with a very limited resources. These routines are thread\r
+//        safe and reentrant!\r
+//        Use this instead of the bloated standard/newlib printf cause these use\r
+//        malloc for printf (and may not be thread safe).\r
+//\r
+///////////////////////////////////////////////////////////////////////////////\r
+\r
+#include <stdbool.h>\r
+#include <stdint.h>\r
+\r
+#include "printf.h"\r
+\r
+\r
+// define this globally (e.g. gcc -DPRINTF_INCLUDE_CONFIG_H ...) to include the\r
+// printf_config.h header file\r
+// default: undefined\r
+#ifdef PRINTF_INCLUDE_CONFIG_H\r
+#include "printf_config.h"\r
+#endif\r
+\r
+\r
+// 'ntoa' conversion buffer size, this must be big enough to hold one converted\r
+// numeric number including padded zeros (dynamically created on stack)\r
+// default: 32 byte\r
+#ifndef PRINTF_NTOA_BUFFER_SIZE\r
+#define PRINTF_NTOA_BUFFER_SIZE    32U\r
+#endif\r
+\r
+// 'ftoa' conversion buffer size, this must be big enough to hold one converted\r
+// float number including padded zeros (dynamically created on stack)\r
+// default: 32 byte\r
+#ifndef PRINTF_FTOA_BUFFER_SIZE\r
+#define PRINTF_FTOA_BUFFER_SIZE    32U\r
+#endif\r
+\r
+// support for the floating point type (%f)\r
+// default: activated\r
+#ifndef PRINTF_DISABLE_SUPPORT_FLOAT\r
+#define PRINTF_SUPPORT_FLOAT\r
+#endif\r
+\r
+// support for exponential floating point notation (%e/%g)\r
+// default: activated\r
+#ifndef PRINTF_DISABLE_SUPPORT_EXPONENTIAL\r
+#define PRINTF_SUPPORT_EXPONENTIAL\r
+#endif\r
+\r
+// define the default floating point precision\r
+// default: 6 digits\r
+#ifndef PRINTF_DEFAULT_FLOAT_PRECISION\r
+#define PRINTF_DEFAULT_FLOAT_PRECISION  6U\r
+#endif\r
+\r
+// define the largest float suitable to print with %f\r
+// default: 1e9\r
+#ifndef PRINTF_MAX_FLOAT\r
+#define PRINTF_MAX_FLOAT  1e9\r
+#endif\r
+\r
+// support for the long long types (%llu or %p)\r
+// default: activated\r
+#ifndef PRINTF_DISABLE_SUPPORT_LONG_LONG\r
+#define PRINTF_SUPPORT_LONG_LONG\r
+#endif\r
+\r
+// support for the ptrdiff_t type (%t)\r
+// ptrdiff_t is normally defined in <stddef.h> as long or long long type\r
+// default: activated\r
+#ifndef PRINTF_DISABLE_SUPPORT_PTRDIFF_T\r
+#define PRINTF_SUPPORT_PTRDIFF_T\r
+#endif\r
+\r
+///////////////////////////////////////////////////////////////////////////////\r
+\r
+// internal flag definitions\r
+#define FLAGS_ZEROPAD   (1U <<  0U)\r
+#define FLAGS_LEFT      (1U <<  1U)\r
+#define FLAGS_PLUS      (1U <<  2U)\r
+#define FLAGS_SPACE     (1U <<  3U)\r
+#define FLAGS_HASH      (1U <<  4U)\r
+#define FLAGS_UPPERCASE (1U <<  5U)\r
+#define FLAGS_CHAR      (1U <<  6U)\r
+#define FLAGS_SHORT     (1U <<  7U)\r
+#define FLAGS_LONG      (1U <<  8U)\r
+#define FLAGS_LONG_LONG (1U <<  9U)\r
+#define FLAGS_PRECISION (1U << 10U)\r
+#define FLAGS_ADAPT_EXP (1U << 11U)\r
+\r
+\r
+// import float.h for DBL_MAX\r
+#if defined(PRINTF_SUPPORT_FLOAT)\r
+#include <float.h>\r
+#endif\r
+\r
+\r
+// output function type\r
+typedef void (*out_fct_type)(char character, void* buffer, size_t idx, size_t maxlen);\r
+\r
+\r
+// wrapper (used as buffer) for output function type\r
+typedef struct {\r
+  void  (*fct)(char character, void* arg);\r
+  void* arg;\r
+} out_fct_wrap_type;\r
+\r
+\r
+// internal buffer output\r
+static inline void _out_buffer(char character, void* buffer, size_t idx, size_t maxlen)\r
+{\r
+  if (idx < maxlen) {\r
+    ((char*)buffer)[idx] = character;\r
+  }\r
+}\r
+\r
+\r
+// internal null output\r
+static inline void _out_null(char character, void* buffer, size_t idx, size_t maxlen)\r
+{\r
+  (void)character; (void)buffer; (void)idx; (void)maxlen;\r
+}\r
+\r
+\r
+// internal _putchar wrapper\r
+static inline void _out_char(char character, void* buffer, size_t idx, size_t maxlen)\r
+{\r
+  (void)buffer; (void)idx; (void)maxlen;\r
+  if (character) {\r
+    _putchar(character);\r
+  }\r
+}\r
+\r
+\r
+// internal output function wrapper\r
+static inline void _out_fct(char character, void* buffer, size_t idx, size_t maxlen)\r
+{\r
+  (void)idx; (void)maxlen;\r
+  if (character) {\r
+    // buffer is the output fct pointer\r
+    ((out_fct_wrap_type*)buffer)->fct(character, ((out_fct_wrap_type*)buffer)->arg);\r
+  }\r
+}\r
+\r
+\r
+// internal secure strlen\r
+// \return The length of the string (excluding the terminating 0) limited by 'maxsize'\r
+static inline unsigned int _strnlen_s(const char* str, size_t maxsize)\r
+{\r
+  const char* s;\r
+  for (s = str; *s && maxsize--; ++s);\r
+  return (unsigned int)(s - str);\r
+}\r
+\r
+\r
+// internal test if char is a digit (0-9)\r
+// \return true if char is a digit\r
+static inline bool _is_digit(char ch)\r
+{\r
+  return (ch >= '0') && (ch <= '9');\r
+}\r
+\r
+\r
+// internal ASCII string to unsigned int conversion\r
+static unsigned int _atoi(const char** str)\r
+{\r
+  unsigned int i = 0U;\r
+  while (_is_digit(**str)) {\r
+    i = i * 10U + (unsigned int)(*((*str)++) - '0');\r
+  }\r
+  return i;\r
+}\r
+\r
+\r
+// output the specified string in reverse, taking care of any zero-padding\r
+static size_t _out_rev(out_fct_type out, char* buffer, size_t idx, size_t maxlen, const char* buf, size_t len, unsigned int width, unsigned int flags)\r
+{\r
+  const size_t start_idx = idx;\r
+\r
+  // pad spaces up to given width\r
+  if (!(flags & FLAGS_LEFT) && !(flags & FLAGS_ZEROPAD)) {\r
+    for (size_t i = len; i < width; i++) {\r
+      out(' ', buffer, idx++, maxlen);\r
+    }\r
+  }\r
+\r
+  // reverse string\r
+  while (len) {\r
+    out(buf[--len], buffer, idx++, maxlen);\r
+  }\r
+\r
+  // append pad spaces up to given width\r
+  if (flags & FLAGS_LEFT) {\r
+    while (idx - start_idx < width) {\r
+      out(' ', buffer, idx++, maxlen);\r
+    }\r
+  }\r
+\r
+  return idx;\r
+}\r
+\r
+\r
+// internal itoa format\r
+static size_t _ntoa_format(out_fct_type out, char* buffer, size_t idx, size_t maxlen, char* buf, size_t len, bool negative, unsigned int base, unsigned int prec, unsigned int width, unsigned int flags)\r
+{\r
+  // pad leading zeros\r
+  if (!(flags & FLAGS_LEFT)) {\r
+    if (width && (flags & FLAGS_ZEROPAD) && (negative || (flags & (FLAGS_PLUS | FLAGS_SPACE)))) {\r
+      width--;\r
+    }\r
+    while ((len < prec) && (len < PRINTF_NTOA_BUFFER_SIZE)) {\r
+      buf[len++] = '0';\r
+    }\r
+    while ((flags & FLAGS_ZEROPAD) && (len < width) && (len < PRINTF_NTOA_BUFFER_SIZE)) {\r
+      buf[len++] = '0';\r
+    }\r
+  }\r
+\r
+  // handle hash\r
+  if (flags & FLAGS_HASH) {\r
+    if (!(flags & FLAGS_PRECISION) && len && ((len == prec) || (len == width))) {\r
+      len--;\r
+      if (len && (base == 16U)) {\r
+        len--;\r
+      }\r
+    }\r
+    if ((base == 16U) && !(flags & FLAGS_UPPERCASE) && (len < PRINTF_NTOA_BUFFER_SIZE)) {\r
+      buf[len++] = 'x';\r
+    }\r
+    else if ((base == 16U) && (flags & FLAGS_UPPERCASE) && (len < PRINTF_NTOA_BUFFER_SIZE)) {\r
+      buf[len++] = 'X';\r
+    }\r
+    else if ((base == 2U) && (len < PRINTF_NTOA_BUFFER_SIZE)) {\r
+      buf[len++] = 'b';\r
+    }\r
+    if (len < PRINTF_NTOA_BUFFER_SIZE) {\r
+      buf[len++] = '0';\r
+    }\r
+  }\r
+\r
+  if (len < PRINTF_NTOA_BUFFER_SIZE) {\r
+    if (negative) {\r
+      buf[len++] = '-';\r
+    }\r
+    else if (flags & FLAGS_PLUS) {\r
+      buf[len++] = '+';  // ignore the space if the '+' exists\r
+    }\r
+    else if (flags & FLAGS_SPACE) {\r
+      buf[len++] = ' ';\r
+    }\r
+  }\r
+\r
+  return _out_rev(out, buffer, idx, maxlen, buf, len, width, flags);\r
+}\r
+\r
+\r
+// internal itoa for 'long' type\r
+static size_t _ntoa_long(out_fct_type out, char* buffer, size_t idx, size_t maxlen, unsigned long value, bool negative, unsigned long base, unsigned int prec, unsigned int width, unsigned int flags)\r
+{\r
+  char buf[PRINTF_NTOA_BUFFER_SIZE];\r
+  size_t len = 0U;\r
+\r
+  // no hash for 0 values\r
+  if (!value) {\r
+    flags &= ~FLAGS_HASH;\r
+  }\r
+\r
+  // write if precision != 0 and value is != 0\r
+  if (!(flags & FLAGS_PRECISION) || value) {\r
+    do {\r
+      const char digit = (char)(value % base);\r
+      buf[len++] = digit < 10 ? '0' + digit : (flags & FLAGS_UPPERCASE ? 'A' : 'a') + digit - 10;\r
+      value /= base;\r
+    } while (value && (len < PRINTF_NTOA_BUFFER_SIZE));\r
+  }\r
+\r
+  return _ntoa_format(out, buffer, idx, maxlen, buf, len, negative, (unsigned int)base, prec, width, flags);\r
+}\r
+\r
+\r
+// internal itoa for 'long long' type\r
+#if defined(PRINTF_SUPPORT_LONG_LONG)\r
+static size_t _ntoa_long_long(out_fct_type out, char* buffer, size_t idx, size_t maxlen, unsigned long long value, bool negative, unsigned long long base, unsigned int prec, unsigned int width, unsigned int flags)\r
+{\r
+  char buf[PRINTF_NTOA_BUFFER_SIZE];\r
+  size_t len = 0U;\r
+\r
+  // no hash for 0 values\r
+  if (!value) {\r
+    flags &= ~FLAGS_HASH;\r
+  }\r
+\r
+  // write if precision != 0 and value is != 0\r
+  if (!(flags & FLAGS_PRECISION) || value) {\r
+    do {\r
+      const char digit = (char)(value % base);\r
+      buf[len++] = digit < 10 ? '0' + digit : (flags & FLAGS_UPPERCASE ? 'A' : 'a') + digit - 10;\r
+      value /= base;\r
+    } while (value && (len < PRINTF_NTOA_BUFFER_SIZE));\r
+  }\r
+\r
+  return _ntoa_format(out, buffer, idx, maxlen, buf, len, negative, (unsigned int)base, prec, width, flags);\r
+}\r
+#endif  // PRINTF_SUPPORT_LONG_LONG\r
+\r
+\r
+#if defined(PRINTF_SUPPORT_FLOAT)\r
+\r
+#if defined(PRINTF_SUPPORT_EXPONENTIAL)\r
+// forward declaration so that _ftoa can switch to exp notation for values > PRINTF_MAX_FLOAT\r
+static size_t _etoa(out_fct_type out, char* buffer, size_t idx, size_t maxlen, double value, unsigned int prec, unsigned int width, unsigned int flags);\r
+#endif\r
+\r
+\r
+// internal ftoa for fixed decimal floating point\r
+static size_t _ftoa(out_fct_type out, char* buffer, size_t idx, size_t maxlen, double value, unsigned int prec, unsigned int width, unsigned int flags)\r
+{\r
+  char buf[PRINTF_FTOA_BUFFER_SIZE];\r
+  size_t len  = 0U;\r
+  double diff = 0.0;\r
+\r
+  // powers of 10\r
+  static const double pow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };\r
+\r
+  // test for special values\r
+  if (value != value)\r
+    return _out_rev(out, buffer, idx, maxlen, "nan", 3, width, flags);\r
+  if (value < -DBL_MAX)\r
+    return _out_rev(out, buffer, idx, maxlen, "fni-", 4, width, flags);\r
+  if (value > DBL_MAX)\r
+    return _out_rev(out, buffer, idx, maxlen, (flags & FLAGS_PLUS) ? "fni+" : "fni", (flags & FLAGS_PLUS) ? 4U : 3U, width, flags);\r
+\r
+  // test for very large values\r
+  // standard printf behavior is to print EVERY whole number digit -- which could be 100s of characters overflowing your buffers == bad\r
+  if ((value > PRINTF_MAX_FLOAT) || (value < -PRINTF_MAX_FLOAT)) {\r
+#if defined(PRINTF_SUPPORT_EXPONENTIAL)\r
+    return _etoa(out, buffer, idx, maxlen, value, prec, width, flags);\r
+#else\r
+    return 0U;\r
+#endif\r
+  }\r
+\r
+  // test for negative\r
+  bool negative = false;\r
+  if (value < 0) {\r
+    negative = true;\r
+    value = 0 - value;\r
+  }\r
+\r
+  // set default precision, if not set explicitly\r
+  if (!(flags & FLAGS_PRECISION)) {\r
+    prec = PRINTF_DEFAULT_FLOAT_PRECISION;\r
+  }\r
+  // limit precision to 9, cause a prec >= 10 can lead to overflow errors\r
+  while ((len < PRINTF_FTOA_BUFFER_SIZE) && (prec > 9U)) {\r
+    buf[len++] = '0';\r
+    prec--;\r
+  }\r
+\r
+  int whole = (int)value;\r
+  double tmp = (value - whole) * pow10[prec];\r
+  unsigned long frac = (unsigned long)tmp;\r
+  diff = tmp - frac;\r
+\r
+  if (diff > 0.5) {\r
+    ++frac;\r
+    // handle rollover, e.g. case 0.99 with prec 1 is 1.0\r
+    if (frac >= pow10[prec]) {\r
+      frac = 0;\r
+      ++whole;\r
+    }\r
+  }\r
+  else if (diff < 0.5) {\r
+  }\r
+  else if ((frac == 0U) || (frac & 1U)) {\r
+    // if halfway, round up if odd OR if last digit is 0\r
+    ++frac;\r
+  }\r
+\r
+  if (prec == 0U) {\r
+    diff = value - (double)whole;\r
+    if ((!(diff < 0.5) || (diff > 0.5)) && (whole & 1)) {\r
+      // exactly 0.5 and ODD, then round up\r
+      // 1.5 -> 2, but 2.5 -> 2\r
+      ++whole;\r
+    }\r
+  }\r
+  else {\r
+    unsigned int count = prec;\r
+    // now do fractional part, as an unsigned number\r
+    while (len < PRINTF_FTOA_BUFFER_SIZE) {\r
+      --count;\r
+      buf[len++] = (char)(48U + (frac % 10U));\r
+      if (!(frac /= 10U)) {\r
+        break;\r
+      }\r
+    }\r
+    // add extra 0s\r
+    while ((len < PRINTF_FTOA_BUFFER_SIZE) && (count-- > 0U)) {\r
+      buf[len++] = '0';\r
+    }\r
+    if (len < PRINTF_FTOA_BUFFER_SIZE) {\r
+      // add decimal\r
+      buf[len++] = '.';\r
+    }\r
+  }\r
+\r
+  // do whole part, number is reversed\r
+  while (len < PRINTF_FTOA_BUFFER_SIZE) {\r
+    buf[len++] = (char)(48 + (whole % 10));\r
+    if (!(whole /= 10)) {\r
+      break;\r
+    }\r
+  }\r
+\r
+  // pad leading zeros\r
+  if (!(flags & FLAGS_LEFT) && (flags & FLAGS_ZEROPAD)) {\r
+    if (width && (negative || (flags & (FLAGS_PLUS | FLAGS_SPACE)))) {\r
+      width--;\r
+    }\r
+    while ((len < width) && (len < PRINTF_FTOA_BUFFER_SIZE)) {\r
+      buf[len++] = '0';\r
+    }\r
+  }\r
+\r
+  if (len < PRINTF_FTOA_BUFFER_SIZE) {\r
+    if (negative) {\r
+      buf[len++] = '-';\r
+    }\r
+    else if (flags & FLAGS_PLUS) {\r
+      buf[len++] = '+';  // ignore the space if the '+' exists\r
+    }\r
+    else if (flags & FLAGS_SPACE) {\r
+      buf[len++] = ' ';\r
+    }\r
+  }\r
+\r
+  return _out_rev(out, buffer, idx, maxlen, buf, len, width, flags);\r
+}\r
+\r
+\r
+#if defined(PRINTF_SUPPORT_EXPONENTIAL)\r
+// internal ftoa variant for exponential floating-point type, contributed by Martijn Jasperse <m.jasperse@gmail.com>\r
+static size_t _etoa(out_fct_type out, char* buffer, size_t idx, size_t maxlen, double value, unsigned int prec, unsigned int width, unsigned int flags)\r
+{\r
+  // check for NaN and special values\r
+  if ((value != value) || (value > DBL_MAX) || (value < -DBL_MAX)) {\r
+    return _ftoa(out, buffer, idx, maxlen, value, prec, width, flags);\r
+  }\r
+\r
+  // determine the sign\r
+  const bool negative = value < 0;\r
+  if (negative) {\r
+    value = -value;\r
+  }\r
+\r
+  // default precision\r
+  if (!(flags & FLAGS_PRECISION)) {\r
+    prec = PRINTF_DEFAULT_FLOAT_PRECISION;\r
+  }\r
+\r
+  // determine the decimal exponent\r
+  // based on the algorithm by David Gay (https://www.ampl.com/netlib/fp/dtoa.c)\r
+  union {\r
+    uint64_t U;\r
+    double   F;\r
+  } conv;\r
+\r
+  conv.F = value;\r
+  int exp2 = (int)((conv.U >> 52U) & 0x07FFU) - 1023;           // effectively log2\r
+  conv.U = (conv.U & ((1ULL << 52U) - 1U)) | (1023ULL << 52U);  // drop the exponent so conv.F is now in [1,2)\r
+  // now approximate log10 from the log2 integer part and an expansion of ln around 1.5\r
+  int expval = (int)(0.1760912590558 + exp2 * 0.301029995663981 + (conv.F - 1.5) * 0.289529654602168);\r
+  // now we want to compute 10^expval but we want to be sure it won't overflow\r
+  exp2 = (int)(expval * 3.321928094887362 + 0.5);\r
+  const double z  = expval * 2.302585092994046 - exp2 * 0.6931471805599453;\r
+  const double z2 = z * z;\r
+  conv.U = (uint64_t)(exp2 + 1023) << 52U;\r
+  // compute exp(z) using continued fractions, see https://en.wikipedia.org/wiki/Exponential_function#Continued_fractions_for_ex\r
+  conv.F *= 1 + 2 * z / (2 - z + (z2 / (6 + (z2 / (10 + z2 / 14)))));\r
+  // correct for rounding errors\r
+  if (value < conv.F) {\r
+    expval--;\r
+    conv.F /= 10;\r
+  }\r
+\r
+  // the exponent format is "%+03d" and largest value is "307", so set aside 4-5 characters\r
+  unsigned int minwidth = ((expval < 100) && (expval > -100)) ? 4U : 5U;\r
+\r
+  // in "%g" mode, "prec" is the number of *significant figures* not decimals\r
+  if (flags & FLAGS_ADAPT_EXP) {\r
+    // do we want to fall-back to "%f" mode?\r
+    if ((value >= 1e-4) && (value < 1e6)) {\r
+      if ((int)prec > expval) {\r
+        prec = (unsigned)((int)prec - expval - 1);\r
+      }\r
+      else {\r
+        prec = 0;\r
+      }\r
+      flags |= FLAGS_PRECISION;   // make sure _ftoa respects precision\r
+      // no characters in exponent\r
+      minwidth = 0U;\r
+      expval   = 0;\r
+    }\r
+    else {\r
+      // we use one sigfig for the whole part\r
+      if ((prec > 0) && (flags & FLAGS_PRECISION)) {\r
+        --prec;\r
+      }\r
+    }\r
+  }\r
+\r
+  // will everything fit?\r
+  unsigned int fwidth = width;\r
+  if (width > minwidth) {\r
+    // we didn't fall-back so subtract the characters required for the exponent\r
+    fwidth -= minwidth;\r
+  } else {\r
+    // not enough characters, so go back to default sizing\r
+    fwidth = 0U;\r
+  }\r
+  if ((flags & FLAGS_LEFT) && minwidth) {\r
+    // if we're padding on the right, DON'T pad the floating part\r
+    fwidth = 0U;\r
+  }\r
+\r
+  // rescale the float value\r
+  if (expval) {\r
+    value /= conv.F;\r
+  }\r
+\r
+  // output the floating part\r
+  const size_t start_idx = idx;\r
+  idx = _ftoa(out, buffer, idx, maxlen, negative ? -value : value, prec, fwidth, flags & ~FLAGS_ADAPT_EXP);\r
+\r
+  // output the exponent part\r
+  if (minwidth) {\r
+    // output the exponential symbol\r
+    out((flags & FLAGS_UPPERCASE) ? 'E' : 'e', buffer, idx++, maxlen);\r
+    // output the exponent value\r
+    idx = _ntoa_long(out, buffer, idx, maxlen, (expval < 0) ? -expval : expval, expval < 0, 10, 0, minwidth-1, FLAGS_ZEROPAD | FLAGS_PLUS);\r
+    // might need to right-pad spaces\r
+    if (flags & FLAGS_LEFT) {\r
+      while (idx - start_idx < width) out(' ', buffer, idx++, maxlen);\r
+    }\r
+  }\r
+  return idx;\r
+}\r
+#endif  // PRINTF_SUPPORT_EXPONENTIAL\r
+#endif  // PRINTF_SUPPORT_FLOAT\r
+\r
+\r
+// internal vsnprintf\r
+static int _vsnprintf(out_fct_type out, char* buffer, const size_t maxlen, const char* format, va_list va)\r
+{\r
+  unsigned int flags, width, precision, n;\r
+  size_t idx = 0U;\r
+\r
+  if (!buffer) {\r
+    // use null output function\r
+    out = _out_null;\r
+  }\r
+\r
+  while (*format)\r
+  {\r
+    // format specifier?  %[flags][width][.precision][length]\r
+    if (*format != '%') {\r
+      // no\r
+      out(*format, buffer, idx++, maxlen);\r
+      format++;\r
+      continue;\r
+    }\r
+    else {\r
+      // yes, evaluate it\r
+      format++;\r
+    }\r
+\r
+    // evaluate flags\r
+    flags = 0U;\r
+    do {\r
+      switch (*format) {\r
+        case '0': flags |= FLAGS_ZEROPAD; format++; n = 1U; break;\r
+        case '-': flags |= FLAGS_LEFT;    format++; n = 1U; break;\r
+        case '+': flags |= FLAGS_PLUS;    format++; n = 1U; break;\r
+        case ' ': flags |= FLAGS_SPACE;   format++; n = 1U; break;\r
+        case '#': flags |= FLAGS_HASH;    format++; n = 1U; break;\r
+        default :                                   n = 0U; break;\r
+      }\r
+    } while (n);\r
+\r
+    // evaluate width field\r
+    width = 0U;\r
+    if (_is_digit(*format)) {\r
+      width = _atoi(&format);\r
+    }\r
+    else if (*format == '*') {\r
+      const int w = va_arg(va, int);\r
+      if (w < 0) {\r
+        flags |= FLAGS_LEFT;    // reverse padding\r
+        width = (unsigned int)-w;\r
+      }\r
+      else {\r
+        width = (unsigned int)w;\r
+      }\r
+      format++;\r
+    }\r
+\r
+    // evaluate precision field\r
+    precision = 0U;\r
+    if (*format == '.') {\r
+      flags |= FLAGS_PRECISION;\r
+      format++;\r
+      if (_is_digit(*format)) {\r
+        precision = _atoi(&format);\r
+      }\r
+      else if (*format == '*') {\r
+        const int prec = (int)va_arg(va, int);\r
+        precision = prec > 0 ? (unsigned int)prec : 0U;\r
+        format++;\r
+      }\r
+    }\r
+\r
+    // evaluate length field\r
+    switch (*format) {\r
+      case 'l' :\r
+        flags |= FLAGS_LONG;\r
+        format++;\r
+        if (*format == 'l') {\r
+          flags |= FLAGS_LONG_LONG;\r
+          format++;\r
+        }\r
+        break;\r
+      case 'h' :\r
+        flags |= FLAGS_SHORT;\r
+        format++;\r
+        if (*format == 'h') {\r
+          flags |= FLAGS_CHAR;\r
+          format++;\r
+        }\r
+        break;\r
+#if defined(PRINTF_SUPPORT_PTRDIFF_T)\r
+      case 't' :\r
+        flags |= (sizeof(ptrdiff_t) == sizeof(long) ? FLAGS_LONG : FLAGS_LONG_LONG);\r
+        format++;\r
+        break;\r
+#endif\r
+      case 'j' :\r
+        flags |= (sizeof(intmax_t) == sizeof(long) ? FLAGS_LONG : FLAGS_LONG_LONG);\r
+        format++;\r
+        break;\r
+      case 'z' :\r
+        flags |= (sizeof(size_t) == sizeof(long) ? FLAGS_LONG : FLAGS_LONG_LONG);\r
+        format++;\r
+        break;\r
+      default :\r
+        break;\r
+    }\r
+\r
+    // evaluate specifier\r
+    switch (*format) {\r
+      case 'd' :\r
+      case 'i' :\r
+      case 'u' :\r
+      case 'x' :\r
+      case 'X' :\r
+      case 'o' :\r
+      case 'b' : {\r
+        // set the base\r
+        unsigned int base;\r
+        if (*format == 'x' || *format == 'X') {\r
+          base = 16U;\r
+        }\r
+        else if (*format == 'o') {\r
+          base =  8U;\r
+        }\r
+        else if (*format == 'b') {\r
+          base =  2U;\r
+        }\r
+        else {\r
+          base = 10U;\r
+          flags &= ~FLAGS_HASH;   // no hash for dec format\r
+        }\r
+        // uppercase\r
+        if (*format == 'X') {\r
+          flags |= FLAGS_UPPERCASE;\r
+        }\r
+\r
+        // no plus or space flag for u, x, X, o, b\r
+        if ((*format != 'i') && (*format != 'd')) {\r
+          flags &= ~(FLAGS_PLUS | FLAGS_SPACE);\r
+        }\r
+\r
+        // ignore '0' flag when precision is given\r
+        if (flags & FLAGS_PRECISION) {\r
+          flags &= ~FLAGS_ZEROPAD;\r
+        }\r
+\r
+        // convert the integer\r
+        if ((*format == 'i') || (*format == 'd')) {\r
+          // signed\r
+          if (flags & FLAGS_LONG_LONG) {\r
+#if defined(PRINTF_SUPPORT_LONG_LONG)\r
+            const long long value = va_arg(va, long long);\r
+            idx = _ntoa_long_long(out, buffer, idx, maxlen, (unsigned long long)(value > 0 ? value : 0 - value), value < 0, base, precision, width, flags);\r
+#endif\r
+          }\r
+          else if (flags & FLAGS_LONG) {\r
+            const long value = va_arg(va, long);\r
+            idx = _ntoa_long(out, buffer, idx, maxlen, (unsigned long)(value > 0 ? value : 0 - value), value < 0, base, precision, width, flags);\r
+          }\r
+          else {\r
+            const int value = (flags & FLAGS_CHAR) ? (char)va_arg(va, int) : (flags & FLAGS_SHORT) ? (short int)va_arg(va, int) : va_arg(va, int);\r
+            idx = _ntoa_long(out, buffer, idx, maxlen, (unsigned int)(value > 0 ? value : 0 - value), value < 0, base, precision, width, flags);\r
+          }\r
+        }\r
+        else {\r
+          // unsigned\r
+          if (flags & FLAGS_LONG_LONG) {\r
+#if defined(PRINTF_SUPPORT_LONG_LONG)\r
+            idx = _ntoa_long_long(out, buffer, idx, maxlen, va_arg(va, unsigned long long), false, base, precision, width, flags);\r
+#endif\r
+          }\r
+          else if (flags & FLAGS_LONG) {\r
+            idx = _ntoa_long(out, buffer, idx, maxlen, va_arg(va, unsigned long), false, base, precision, width, flags);\r
+          }\r
+          else {\r
+            const unsigned int value = (flags & FLAGS_CHAR) ? (unsigned char)va_arg(va, unsigned int) : (flags & FLAGS_SHORT) ? (unsigned short int)va_arg(va, unsigned int) : va_arg(va, unsigned int);\r
+            idx = _ntoa_long(out, buffer, idx, maxlen, value, false, base, precision, width, flags);\r
+          }\r
+        }\r
+        format++;\r
+        break;\r
+      }\r
+#if defined(PRINTF_SUPPORT_FLOAT)\r
+      case 'f' :\r
+      case 'F' :\r
+        if (*format == 'F') flags |= FLAGS_UPPERCASE;\r
+        idx = _ftoa(out, buffer, idx, maxlen, va_arg(va, double), precision, width, flags);\r
+        format++;\r
+        break;\r
+#if defined(PRINTF_SUPPORT_EXPONENTIAL)\r
+      case 'e':\r
+      case 'E':\r
+      case 'g':\r
+      case 'G':\r
+        if ((*format == 'g')||(*format == 'G')) flags |= FLAGS_ADAPT_EXP;\r
+        if ((*format == 'E')||(*format == 'G')) flags |= FLAGS_UPPERCASE;\r
+        idx = _etoa(out, buffer, idx, maxlen, va_arg(va, double), precision, width, flags);\r
+        format++;\r
+        break;\r
+#endif  // PRINTF_SUPPORT_EXPONENTIAL\r
+#endif  // PRINTF_SUPPORT_FLOAT\r
+      case 'c' : {\r
+        unsigned int l = 1U;\r
+        // pre padding\r
+        if (!(flags & FLAGS_LEFT)) {\r
+          while (l++ < width) {\r
+            out(' ', buffer, idx++, maxlen);\r
+          }\r
+        }\r
+        // char output\r
+        out((char)va_arg(va, int), buffer, idx++, maxlen);\r
+        // post padding\r
+        if (flags & FLAGS_LEFT) {\r
+          while (l++ < width) {\r
+            out(' ', buffer, idx++, maxlen);\r
+          }\r
+        }\r
+        format++;\r
+        break;\r
+      }\r
+\r
+      case 's' : {\r
+        const char* p = va_arg(va, char*);\r
+        unsigned int l = _strnlen_s(p, precision ? precision : (size_t)-1);\r
+        // pre padding\r
+        if (flags & FLAGS_PRECISION) {\r
+          l = (l < precision ? l : precision);\r
+        }\r
+        if (!(flags & FLAGS_LEFT)) {\r
+          while (l++ < width) {\r
+            out(' ', buffer, idx++, maxlen);\r
+          }\r
+        }\r
+        // string output\r
+        while ((*p != 0) && (!(flags & FLAGS_PRECISION) || precision--)) {\r
+          out(*(p++), buffer, idx++, maxlen);\r
+        }\r
+        // post padding\r
+        if (flags & FLAGS_LEFT) {\r
+          while (l++ < width) {\r
+            out(' ', buffer, idx++, maxlen);\r
+          }\r
+        }\r
+        format++;\r
+        break;\r
+      }\r
+\r
+      case 'p' : {\r
+        width = sizeof(void*) * 2U;\r
+        flags |= FLAGS_ZEROPAD | FLAGS_UPPERCASE;\r
+#if defined(PRINTF_SUPPORT_LONG_LONG)\r
+        const bool is_ll = sizeof(uintptr_t) == sizeof(long long);\r
+        if (is_ll) {\r
+          idx = _ntoa_long_long(out, buffer, idx, maxlen, (uintptr_t)va_arg(va, void*), false, 16U, precision, width, flags);\r
+        }\r
+        else {\r
+#endif\r
+          idx = _ntoa_long(out, buffer, idx, maxlen, (unsigned long)((uintptr_t)va_arg(va, void*)), false, 16U, precision, width, flags);\r
+#if defined(PRINTF_SUPPORT_LONG_LONG)\r
+        }\r
+#endif\r
+        format++;\r
+        break;\r
+      }\r
+\r
+      case '%' :\r
+        out('%', buffer, idx++, maxlen);\r
+        format++;\r
+        break;\r
+\r
+      default :\r
+        out(*format, buffer, idx++, maxlen);\r
+        format++;\r
+        break;\r
+    }\r
+  }\r
+\r
+  // termination\r
+  out((char)0, buffer, idx < maxlen ? idx : maxlen - 1U, maxlen);\r
+\r
+  // return written chars without terminating \0\r
+  return (int)idx;\r
+}\r
+\r
+\r
+///////////////////////////////////////////////////////////////////////////////\r
+\r
+int printf_(const char* format, ...)\r
+{\r
+  va_list va;\r
+  va_start(va, format);\r
+  char buffer[1];\r
+  const int ret = _vsnprintf(_out_char, buffer, (size_t)-1, format, va);\r
+  va_end(va);\r
+  return ret;\r
+}\r
+\r
+\r
+int sprintf_(char* buffer, const char* format, ...)\r
+{\r
+  va_list va;\r
+  va_start(va, format);\r
+  const int ret = _vsnprintf(_out_buffer, buffer, (size_t)-1, format, va);\r
+  va_end(va);\r
+  return ret;\r
+}\r
+\r
+\r
+int snprintf_(char* buffer, size_t count, const char* format, ...)\r
+{\r
+  va_list va;\r
+  va_start(va, format);\r
+  const int ret = _vsnprintf(_out_buffer, buffer, count, format, va);\r
+  va_end(va);\r
+  return ret;\r
+}\r
+\r
+\r
+int vprintf_(const char* format, va_list va)\r
+{\r
+  char buffer[1];\r
+  return _vsnprintf(_out_char, buffer, (size_t)-1, format, va);\r
+}\r
+\r
+\r
+int vsnprintf_(char* buffer, size_t count, const char* format, va_list va)\r
+{\r
+  return _vsnprintf(_out_buffer, buffer, count, format, va);\r
+}\r
+\r
+\r
+int fctprintf(void (*out)(char character, void* arg), void* arg, const char* format, ...)\r
+{\r
+  va_list va;\r
+  va_start(va, format);\r
+  const out_fct_wrap_type out_fct_wrap = { out, arg };\r
+  const int ret = _vsnprintf(_out_fct, (char*)(uintptr_t)&out_fct_wrap, (size_t)-1, format, va);\r
+  va_end(va);\r
+  return ret;\r
+}\r
diff --git a/printf.h b/printf.h
new file mode 100644 (file)
index 0000000..f779cd2
--- /dev/null
+++ b/printf.h
@@ -0,0 +1,112 @@
+///////////////////////////////////////////////////////////////////////////////\r
+// \author (c) Marco Paland (info@paland.com)\r
+//             2014-2019, PALANDesign Hannover, Germany\r
+//\r
+// \license The MIT License (MIT)\r
+//\r
+// Permission is hereby granted, free of charge, to any person obtaining a copy\r
+// of this software and associated documentation files (the "Software"), to deal\r
+// in the Software without restriction, including without limitation the rights\r
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r
+// copies of the Software, and to permit persons to whom the Software is\r
+// furnished to do so, subject to the following conditions:\r
+// \r
+// The above copyright notice and this permission notice shall be included in\r
+// all copies or substantial portions of the Software.\r
+// \r
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r
+// THE SOFTWARE.\r
+//\r
+// \brief Tiny printf, sprintf and snprintf implementation, optimized for speed on\r
+//        embedded systems with a very limited resources.\r
+//        Use this instead of bloated standard/newlib printf.\r
+//        These routines are thread safe and reentrant.\r
+//\r
+///////////////////////////////////////////////////////////////////////////////\r
+\r
+#ifndef _PRINTF_H_\r
+#define _PRINTF_H_\r
+\r
+#include <stdarg.h>\r
+#include <stddef.h>\r
+\r
+\r
+#ifdef __cplusplus\r
+extern "C" {\r
+#endif\r
+\r
+\r
+/**\r
+ * Output a character to a custom device like UART, used by the printf() function\r
+ * This function is declared here only. You have to write your custom implementation somewhere\r
+ * \param character Character to output\r
+ */\r
+void _putchar(char character);\r
+\r
+\r
+/**\r
+ * Tiny printf implementation\r
+ * You have to implement _putchar if you use printf()\r
+ * To avoid conflicts with the regular printf() API it is overridden by macro defines\r
+ * and internal underscore-appended functions like printf_() are used\r
+ * \param format A string that specifies the format of the output\r
+ * \return The number of characters that are written into the array, not counting the terminating null character\r
+ */\r
+int printf_(const char* format, ...);\r
+\r
+\r
+/**\r
+ * Tiny sprintf implementation\r
+ * Due to security reasons (buffer overflow) YOU SHOULD CONSIDER USING (V)SNPRINTF INSTEAD!\r
+ * \param buffer A pointer to the buffer where to store the formatted string. MUST be big enough to store the output!\r
+ * \param format A string that specifies the format of the output\r
+ * \return The number of characters that are WRITTEN into the buffer, not counting the terminating null character\r
+ */\r
+int sprintf_(char* buffer, const char* format, ...);\r
+\r
+\r
+/**\r
+ * Tiny snprintf/vsnprintf implementation\r
+ * \param buffer A pointer to the buffer where to store the formatted string\r
+ * \param count The maximum number of characters to store in the buffer, including a terminating null character\r
+ * \param format A string that specifies the format of the output\r
+ * \param va A value identifying a variable arguments list\r
+ * \return The number of characters that COULD have been written into the buffer, not counting the terminating\r
+ *         null character. A value equal or larger than count indicates truncation. Only when the returned value\r
+ *         is non-negative and less than count, the string has been completely written.\r
+ */\r
+int  snprintf_(char* buffer, size_t count, const char* format, ...);\r
+int vsnprintf_(char* buffer, size_t count, const char* format, va_list va);\r
+\r
+\r
+/**\r
+ * Tiny vprintf implementation\r
+ * \param format A string that specifies the format of the output\r
+ * \param va A value identifying a variable arguments list\r
+ * \return The number of characters that are WRITTEN into the buffer, not counting the terminating null character\r
+ */\r
+int vprintf_(const char* format, va_list va);\r
+\r
+\r
+/**\r
+ * printf with output function\r
+ * You may use this as dynamic alternative to printf() with its fixed _putchar() output\r
+ * \param out An output function which takes one character and an argument pointer\r
+ * \param arg An argument pointer for user data passed to output function\r
+ * \param format A string that specifies the format of the output\r
+ * \return The number of characters that are sent to the output function, not counting the terminating null character\r
+ */\r
+int fctprintf(void (*out)(char character, void* arg), void* arg, const char* format, ...);\r
+\r
+\r
+#ifdef __cplusplus\r
+}\r
+#endif\r
+\r
+\r
+#endif  // _PRINTF_H_\r
index 8c2bc076f3024edbc9042d428385b3dd36f77bc2..276e37587e43de82e2ff8f84ca30aca3dc4cb7d9 100644 (file)
 
 int pthread_create(pthread_t *t, const pthread_attr_t * attr,
                                                                         pthread_start_t start_routine, void * arg) {
+       if (!model) {
+               snapshot_system_init(10000, 1024, 1024, 40000);
+               model = new ModelChecker();
+               model->startChecker();
+       }
+
        struct pthread_params params = { start_routine, arg };
 
        ModelAction *act = new ModelAction(PTHREAD_CREATE, std::memory_order_seq_cst, t, (uint64_t)&params);
@@ -41,6 +47,12 @@ int pthread_join(pthread_t t, void **value_ptr) {
        return 0;
 }
 
+int pthread_detach(pthread_t t) {
+       //Doesn't do anything
+       //Return success
+       return 0;
+}
+
 void pthread_exit(void *value_ptr) {
        Thread * th = thread_current();
        model->switch_to_master(new ModelAction(THREAD_FINISH, std::memory_order_seq_cst, th));
@@ -48,18 +60,28 @@ void pthread_exit(void *value_ptr) {
 }
 
 int pthread_mutex_init(pthread_mutex_t *p_mutex, const pthread_mutexattr_t *) {
+       cdsc::snapmutex *m = new cdsc::snapmutex();
+
        if (!model) {
+               snapshot_system_init(10000, 1024, 1024, 40000);
                model = new ModelChecker();
+               model->startChecker();
        }
 
-       cdsc::mutex *m = new cdsc::mutex();
-
        ModelExecution *execution = model->get_execution();
        execution->getMutexMap()->put(p_mutex, m);
+
        return 0;
 }
 
 int pthread_mutex_lock(pthread_mutex_t *p_mutex) {
+       if (!model) {
+               snapshot_system_init(10000, 1024, 1024, 40000);
+               model = new ModelChecker();
+               model->startChecker();
+       }
+
+
        ModelExecution *execution = model->get_execution();
 
        /* to protect the case where PTHREAD_MUTEX_INITIALIZER is used
@@ -69,7 +91,7 @@ int pthread_mutex_lock(pthread_mutex_t *p_mutex) {
                pthread_mutex_init(p_mutex, NULL);
        }
 
-       cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
+       cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
 
        if (m != NULL) {
                m->lock();
@@ -82,12 +104,12 @@ int pthread_mutex_lock(pthread_mutex_t *p_mutex) {
 
 int pthread_mutex_trylock(pthread_mutex_t *p_mutex) {
        ModelExecution *execution = model->get_execution();
-       cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
+       cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
        return m->try_lock();
 }
 int pthread_mutex_unlock(pthread_mutex_t *p_mutex) {
        ModelExecution *execution = model->get_execution();
-       cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
+       cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
 
        if (m != NULL) {
                m->unlock();
@@ -107,7 +129,7 @@ int pthread_mutex_timedlock (pthread_mutex_t *__restrict p_mutex,
         if (!execution->mutex_map.contains(p_mutex)) {
                 pthread_mutex_init(p_mutex, NULL);
         }
-        cdsc::mutex *m = execution->mutex_map.get(p_mutex);
+        cdsc::snapmutex *m = execution->mutex_map.get(p_mutex);
 
         if (m != NULL) {
                 m->lock();
@@ -131,7 +153,7 @@ int pthread_key_delete(pthread_key_t) {
 }
 
 int pthread_cond_init(pthread_cond_t *p_cond, const pthread_condattr_t *attr) {
-       cdsc::condition_variable *v = new cdsc::condition_variable();
+       cdsc::snapcondition_variable *v = new cdsc::snapcondition_variable();
 
        ModelExecution *execution = model->get_execution();
        execution->getCondMap()->put(p_cond, v);
@@ -143,8 +165,8 @@ int pthread_cond_wait(pthread_cond_t *p_cond, pthread_mutex_t *p_mutex) {
        if ( !execution->getCondMap()->contains(p_cond) )
                pthread_cond_init(p_cond, NULL);
 
-       cdsc::condition_variable *v = execution->getCondMap()->get(p_cond);
-       cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
+       cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
+       cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
 
        v->wait(*m);
        return 0;
@@ -160,8 +182,8 @@ int pthread_cond_timedwait(pthread_cond_t *p_cond,
        if ( !execution->getMutexMap()->contains(p_mutex) )
                pthread_mutex_init(p_mutex, NULL);
 
-       cdsc::condition_variable *v = execution->getCondMap()->get(p_cond);
-       cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
+       cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
+       cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
 
        model->switch_to_master(new ModelAction(NOOP, std::memory_order_seq_cst, v));
 //     v->wait(*m);
@@ -175,7 +197,7 @@ int pthread_cond_signal(pthread_cond_t *p_cond) {
        if ( !execution->getCondMap()->contains(p_cond) )
                pthread_cond_init(p_cond, NULL);
 
-       cdsc::condition_variable *v = execution->getCondMap()->get(p_cond);
+       cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
 
        v->notify_one();
        return 0;
index d926c74e691dd762ccf06b2cd1bca1dfc2fd1a41..4d27ae04cecfcd679b8d81d64061579391a25075 100644 (file)
@@ -5,14 +5,15 @@
 
 #ifndef __SNAPINTERFACE_H
 #define __SNAPINTERFACE_H
+#include <ucontext.h>
 
 typedef unsigned int snapshot_id;
-
 typedef void (*VoidFuncPtr)();
+
 void snapshot_system_init(unsigned int numbackingpages,
                                                                                                        unsigned int numsnapshots, unsigned int nummemoryregions,
-                                                                                                       unsigned int numheappages, VoidFuncPtr entryPoint);
-
+                                                                                                       unsigned int numheappages);
+void startExecution(ucontext_t * context, VoidFuncPtr entryPoint);
 void snapshot_stack_init();
 void snapshot_record(int seq_index);
 int snapshot_backtrack_before(int seq_index);
index ce2b28fe3dc4cb0952ed9b11c8c79b923b71b021..dc42614efd8f89d7f6fb6360132afb2fe7410c5b 100644 (file)
@@ -134,7 +134,7 @@ static void mprot_handle_pf(int sig, siginfo_t *si, void *unused)
 
 static void mprot_snapshot_init(unsigned int numbackingpages,
                                                                                                                                unsigned int numsnapshots, unsigned int nummemoryregions,
-                                                                                                                               unsigned int numheappages, VoidFuncPtr entryPoint)
+                                                                                                                               unsigned int numheappages)
 {
        /* Setup a stack for our signal handler....  */
        stack_t ss;
@@ -179,8 +179,11 @@ static void mprot_snapshot_init(unsigned int numbackingpages,
        pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space);
        model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
        snapshot_add_memory_region(pagealignedbase, numheappages);
+}
 
-       entryPoint();
+static void mprot_startExecution(ucontext_t * context, VoidFuncPtr entryPoint) {
+       /* setup the shared-stack context */
+       create_context(context, fork_snap->mStackBase, model_calloc(STACK_SIZE_DEFAULT, 1), STACK_SIZE_DEFAULT, entryPoint);
 }
 
 static void mprot_add_to_snapshot(void *addr, unsigned int numPages)
@@ -320,6 +323,7 @@ static void create_context(ucontext_t *ctxt, void *stack, size_t stacksize,
        getcontext(ctxt);
        ctxt->uc_stack.ss_sp = stack;
        ctxt->uc_stack.ss_size = stacksize;
+       ctxt->uc_link = NULL;
        makecontext(ctxt, func, 0);
 }
 
@@ -327,7 +331,7 @@ static void create_context(ucontext_t *ctxt, void *stack, size_t stacksize,
  *  process */
 static void fork_exit()
 {
-       /* Intentionally empty */
+       _Exit(EXIT_SUCCESS);
 }
 
 static void createSharedMemory()
@@ -363,30 +367,20 @@ mspace create_shared_mspace()
 
 static void fork_snapshot_init(unsigned int numbackingpages,
                                                                                                                         unsigned int numsnapshots, unsigned int nummemoryregions,
-                                                                                                                        unsigned int numheappages, VoidFuncPtr entryPoint)
+                                                                                                                        unsigned int numheappages)
 {
        if (!fork_snap)
                createSharedMemory();
 
-       void *base_model_snapshot_space = malloc((numheappages + 1) * PAGESIZE);
-       void *pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space);
-       model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
-
-       /* setup an "exiting" context */
-       char stack[128];
-       create_context(&exit_ctxt, stack, sizeof(stack), fork_exit);
-
-       /* setup the shared-stack context */
-       create_context(&fork_snap->shared_ctxt, fork_snap->mStackBase,
-                                                                STACK_SIZE_DEFAULT, entryPoint);
-       /* switch to a new entryPoint context, on a new stack */
-       model_swapcontext(&private_ctxt, &fork_snap->shared_ctxt);
+       model_snapshot_space = create_mspace(numheappages * PAGESIZE, 1);
+}
 
+static void fork_loop() {
        /* switch back here when takesnapshot is called */
        snapshotid = fork_snap->currSnapShotID;
        if (model->params.nofork) {
                setcontext(&fork_snap->shared_ctxt);
-               exit(EXIT_SUCCESS);
+               _Exit(EXIT_SUCCESS);
        }
 
        while (true) {
@@ -409,13 +403,23 @@ static void fork_snapshot_init(unsigned int numbackingpages,
                        }
 
                        if (fork_snap->mIDToRollback != snapshotid)
-                               exit(EXIT_SUCCESS);
+                               _Exit(EXIT_SUCCESS);
                }
        }
 }
 
-static snapshot_id fork_take_snapshot()
-{
+static void fork_startExecution(ucontext_t *context, VoidFuncPtr entryPoint) {
+       /* setup an "exiting" context */
+       char stack[128];
+       create_context(&exit_ctxt, stack, sizeof(stack), fork_exit);
+
+       /* setup the system context */
+       create_context(context, fork_snap->mStackBase, STACK_SIZE_DEFAULT, entryPoint);
+       /* switch to a new entryPoint context, on a new stack */
+       create_context(&private_ctxt, snapshot_calloc(STACK_SIZE_DEFAULT, 1), STACK_SIZE_DEFAULT, fork_loop);
+}
+
+static snapshot_id fork_take_snapshot() {
        model_swapcontext(&fork_snap->shared_ctxt, &private_ctxt);
        DEBUG("TAKESNAPSHOT RETURN\n");
        return snapshotid;
@@ -437,12 +441,21 @@ static void fork_roll_back(snapshot_id theID)
  */
 void snapshot_system_init(unsigned int numbackingpages,
                                                                                                        unsigned int numsnapshots, unsigned int nummemoryregions,
-                                                                                                       unsigned int numheappages, VoidFuncPtr entryPoint)
+                                                                                                       unsigned int numheappages)
+{
+#if USE_MPROTECT_SNAPSHOT
+       mprot_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages);
+#else
+       fork_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages);
+#endif
+}
+
+void startExecution(ucontext_t *context, VoidFuncPtr entryPoint)
 {
 #if USE_MPROTECT_SNAPSHOT
-       mprot_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages, entryPoint);
+       mprot_startExecution(context, entryPoint);
 #else
-       fork_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages, entryPoint);
+       fork_startExecution(context, entryPoint);
 #endif
 }
 
index fcfd59a8825f5b0946fb4fd3786d9347b26a364e..0457c41479d33eab3bee952fa6883f1d6f007628 100644 (file)
@@ -3,11 +3,11 @@
 #include "threads.h"
 #include "librace.h"
 #include "stdatomic.h"
-#include <mutex>
+#include <mutex.h>
 #include <condition_variable>
 
-std::mutex * m;
-std::condition_variable *v;
+cdsc::mutex * m;
+cdsc::condition_variable *v;
 int shareddata;
 
 static void a(void *obj)
@@ -32,8 +32,8 @@ int user_main(int argc, char **argv)
 {
        thrd_t t1, t2;
        store_32(&shareddata, (unsigned int) 0);
-       m=new std::mutex();
-       v=new std::condition_variable();
+       m=new cdsc::mutex();
+       v=new cdsc::condition_variable();
 
        thrd_create(&t1, (thrd_start_t)&a, NULL);
        thrd_create(&t2, (thrd_start_t)&b, NULL);
index 61e3569bbdfe8a9f09efb8e354eacc119257d8ab..02f20b030778420d46a247e7a3980f34bf20396b 100644 (file)
@@ -161,6 +161,7 @@ private:
 };
 
 Thread * thread_current();
+void thread_startup();
 
 static inline thread_id_t thrd_to_id(thrd_t t)
 {