ARM: rockchip: rk3228: add grf definition
[firefly-linux-kernel-4.4.55.git] / include / linux / seqlock.h
index c6db9fb33c448f28197ffb6d135689daf58625b6..18299057402f1bf9015cff936f9f2a37c01a5896 100644 (file)
 #include <linux/preempt.h>
 #include <asm/processor.h>
 
-typedef struct {
-       unsigned sequence;
-       spinlock_t lock;
-} seqlock_t;
-
-/*
- * These macros triggered gcc-3.x compile-time problems.  We think these are
- * OK now.  Be cautious.
- */
-#define __SEQLOCK_UNLOCKED(lockname) \
-                { 0, __SPIN_LOCK_UNLOCKED(lockname) }
-
-#define seqlock_init(x)                                        \
-       do {                                            \
-               (x)->sequence = 0;                      \
-               spin_lock_init(&(x)->lock);             \
-       } while (0)
-
-#define DEFINE_SEQLOCK(x) \
-               seqlock_t x = __SEQLOCK_UNLOCKED(x)
-
-/* Lock out other writers and update the count.
- * Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
- */
-static inline void write_seqlock(seqlock_t *sl)
-{
-       spin_lock(&sl->lock);
-       ++sl->sequence;
-       smp_wmb();
-}
-
-static inline void write_sequnlock(seqlock_t *sl)
-{
-       smp_wmb();
-       sl->sequence++;
-       spin_unlock(&sl->lock);
-}
-
-static inline int write_tryseqlock(seqlock_t *sl)
-{
-       int ret = spin_trylock(&sl->lock);
-
-       if (ret) {
-               ++sl->sequence;
-               smp_wmb();
-       }
-       return ret;
-}
-
-/* Start of read calculation -- fetch last complete writer token */
-static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
-{
-       unsigned ret;
-
-repeat:
-       ret = ACCESS_ONCE(sl->sequence);
-       if (unlikely(ret & 1)) {
-               cpu_relax();
-               goto repeat;
-       }
-       smp_rmb();
-
-       return ret;
-}
-
-/*
- * Test if reader processed invalid data.
- *
- * If sequence value changed then writer changed data while in section.
- */
-static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
-{
-       smp_rmb();
-
-       return unlikely(sl->sequence != start);
-}
-
-
 /*
  * Version using sequence counter only.
  * This can be used when code has its own mutex protecting the
  * updating starting before the write_seqcountbeqin() and ending
  * after the write_seqcount_end().
  */
-
 typedef struct seqcount {
        unsigned sequence;
 } seqcount_t;
@@ -141,7 +61,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
        unsigned ret;
 
 repeat:
-       ret = s->sequence;
+       ret = ACCESS_ONCE(s->sequence);
        if (unlikely(ret & 1)) {
                cpu_relax();
                goto repeat;
@@ -165,6 +85,27 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
        return ret;
 }
 
+/**
+ * raw_seqcount_begin - begin a seq-read critical section
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * raw_seqcount_begin opens a read critical section of the given seqcount.
+ * Validity of the critical section is tested by checking read_seqcount_retry
+ * function.
+ *
+ * Unlike read_seqcount_begin(), this function will not wait for the count
+ * to stabilize. If a writer is active when we begin, we will fail the
+ * read_seqcount_retry() instead of stabilizing at the beginning of the
+ * critical section.
+ */
+static inline unsigned raw_seqcount_begin(const seqcount_t *s)
+{
+       unsigned ret = ACCESS_ONCE(s->sequence);
+       smp_rmb();
+       return ret & ~1;
+}
+
 /**
  * __read_seqcount_retry - end a seq-read critical section (without barrier)
  * @s: pointer to seqcount_t
@@ -197,7 +138,6 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
 {
        smp_rmb();
-
        return __read_seqcount_retry(s, start);
 }
 
@@ -231,31 +171,101 @@ static inline void write_seqcount_barrier(seqcount_t *s)
        s->sequence+=2;
 }
 
+typedef struct {
+       struct seqcount seqcount;
+       spinlock_t lock;
+} seqlock_t;
+
 /*
- * Possible sw/hw IRQ protected versions of the interfaces.
+ * These macros triggered gcc-3.x compile-time problems.  We think these are
+ * OK now.  Be cautious.
  */
-#define write_seqlock_irqsave(lock, flags)                             \
-       do { local_irq_save(flags); write_seqlock(lock); } while (0)
-#define write_seqlock_irq(lock)                                                \
-       do { local_irq_disable();   write_seqlock(lock); } while (0)
-#define write_seqlock_bh(lock)                                         \
-        do { local_bh_disable();    write_seqlock(lock); } while (0)
+#define __SEQLOCK_UNLOCKED(lockname)                   \
+       {                                               \
+               .seqcount = SEQCNT_ZERO,                \
+               .lock = __SPIN_LOCK_UNLOCKED(lockname)  \
+       }
 
-#define write_sequnlock_irqrestore(lock, flags)                                \
-       do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
-#define write_sequnlock_irq(lock)                                      \
-       do { write_sequnlock(lock); local_irq_enable(); } while(0)
-#define write_sequnlock_bh(lock)                                       \
-       do { write_sequnlock(lock); local_bh_enable(); } while(0)
+#define seqlock_init(x)                                        \
+       do {                                            \
+               seqcount_init(&(x)->seqcount);          \
+               spin_lock_init(&(x)->lock);             \
+       } while (0)
+
+#define DEFINE_SEQLOCK(x) \
+               seqlock_t x = __SEQLOCK_UNLOCKED(x)
+
+/*
+ * Read side functions for starting and finalizing a read side section.
+ */
+static inline unsigned read_seqbegin(const seqlock_t *sl)
+{
+       return read_seqcount_begin(&sl->seqcount);
+}
 
-#define read_seqbegin_irqsave(lock, flags)                             \
-       ({ local_irq_save(flags);   read_seqbegin(lock); })
+static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+{
+       return read_seqcount_retry(&sl->seqcount, start);
+}
 
-#define read_seqretry_irqrestore(lock, iv, flags)                      \
-       ({                                                              \
-               int ret = read_seqretry(lock, iv);                      \
-               local_irq_restore(flags);                               \
-               ret;                                                    \
-       })
+/*
+ * Lock out other writers and update the count.
+ * Acts like a normal spin_lock/unlock.
+ * Don't need preempt_disable() because that is in the spin_lock already.
+ */
+static inline void write_seqlock(seqlock_t *sl)
+{
+       spin_lock(&sl->lock);
+       write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock(seqlock_t *sl)
+{
+       write_seqcount_end(&sl->seqcount);
+       spin_unlock(&sl->lock);
+}
+
+static inline void write_seqlock_bh(seqlock_t *sl)
+{
+       spin_lock_bh(&sl->lock);
+       write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock_bh(seqlock_t *sl)
+{
+       write_seqcount_end(&sl->seqcount);
+       spin_unlock_bh(&sl->lock);
+}
+
+static inline void write_seqlock_irq(seqlock_t *sl)
+{
+       spin_lock_irq(&sl->lock);
+       write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock_irq(seqlock_t *sl)
+{
+       write_seqcount_end(&sl->seqcount);
+       spin_unlock_irq(&sl->lock);
+}
+
+static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&sl->lock, flags);
+       write_seqcount_begin(&sl->seqcount);
+       return flags;
+}
+
+#define write_seqlock_irqsave(lock, flags)                             \
+       do { flags = __write_seqlock_irqsave(lock); } while (0)
+
+static inline void
+write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+{
+       write_seqcount_end(&sl->seqcount);
+       spin_unlock_irqrestore(&sl->lock, flags);
+}
 
 #endif /* __LINUX_SEQLOCK_H */