public void Atomic_Int_Load_Should_Fail()
        {
            var atomicInteger = new Atomic <int>(int.MaxValue);

            Assert.Throws <InvalidOperationException>(() => atomicInteger.Load(MemoryOrder.Release));
            Assert.Throws <NotSupportedException>(() => atomicInteger.Load(MemoryOrder.Consume));
        }
        public void Atomic_Long_Load_Should_Fail()
        {
            var atomicLong = new Atomic <long>(long.MaxValue);

            Assert.Throws <InvalidOperationException>(() => atomicLong.Load(MemoryOrder.Release));
            Assert.Throws <NotSupportedException>(() => atomicLong.Load(MemoryOrder.Consume));
        }
Esempio n. 3
0
            public void Enqueue(int n)
            {
                var newNode = new Node(n);

                while (true)
                {
                    var  localTail      = _tail.Load(MemoryOrder.Relaxed);
                    var  localTailNext  = localTail.Next.Load(MemoryOrder.Relaxed);
                    var  tailNow        = _tail.Load(MemoryOrder.Relaxed);
                    bool tailHasChanged = tailNow != localTail;
                    if (tailHasChanged)
                    {
                        continue;
                    }
                    bool tailIsBehind = localTailNext != null;
                    if (tailIsBehind)
                    {
                        // This happens if the first of the two CASes in the else branch of this if has
                        // happened, but not the second. Here we'll correct the tail, causing
                        // that second CAS to fail in the other thread, but at least we won't have to wait for the other
                        // thread (i.e., we must do this to ensure the data structure is lock-free)
                        _tail.CompareExchange(localTailNext, localTail, MemoryOrder.AcquireRelease);
                    }
                    // Otherwise, if tail is still the true end of the queue, try and make its next pointer point at the newNode
                    else if (tailNow.Next.CompareExchange(newNode, null, MemoryOrder.AcquireRelease))
                    {
                        // Now we need to try and get the tail back in sync. This is to maintain the at-most-one-behind
                        // invariant. If we fail, it's OK, someone else did this for us.
                        _tail.CompareExchange(newNode, tailNow, MemoryOrder.AcquireRelease);
                        return;
                    }
                }
            }
Esempio n. 4
0
 public void Thread1()
 {
     if (x.Load(MemoryOrder.Relaxed) == 2)
     {
         y.Store(1, MemoryOrder.Release);
     }
 }
Esempio n. 5
0
 public void Thread1()
 {
     if (x.Load(MemoryOrder.Relaxed) == 3)
     {
         RE.Assert(x.Load(MemoryOrder.Relaxed) >= 3, "x should be at least 3");
     }
 }
Esempio n. 6
0
 public void Thread2()
 {
     if (a.Load(ActiveConfig.MemoryOrder) == 1 && b.Load(ActiveConfig.MemoryOrder) == 0)
     {
         c = 1;
     }
 }
        public void Atomic_Bool_Load_Should_Fail()
        {
            var atomicBoolean = new Atomic <bool>(true);

            Assert.Throws <InvalidOperationException>(() => atomicBoolean.Load(MemoryOrder.Release));
            Assert.Throws <NotSupportedException>(() => atomicBoolean.Load(MemoryOrder.Consume));
        }
 public void Atomic_Long_Load_Should_Success()
 {
     var atomicLong = new Atomic<long>(long.MaxValue);
     Assert.Equal(long.MaxValue, atomicLong.Load(MemoryOrder.Relaxed));
     Assert.Equal(long.MaxValue, atomicLong.Load(MemoryOrder.Acquire));
     Assert.Equal(long.MaxValue, atomicLong.Load(MemoryOrder.AcqRel));
     Assert.Equal(long.MaxValue, atomicLong.Load(MemoryOrder.SeqCst));
 }
 public void Atomic_Int_Load_Should_Success()
 {
     var atomicInteger = new Atomic<int>(int.MaxValue);
     Assert.Equal(int.MaxValue, atomicInteger.Load(MemoryOrder.Relaxed));
     Assert.Equal(int.MaxValue, atomicInteger.Load(MemoryOrder.Acquire));
     Assert.Equal(int.MaxValue, atomicInteger.Load(MemoryOrder.AcqRel));
     Assert.Equal(int.MaxValue, atomicInteger.Load(MemoryOrder.SeqCst));
 }
        public void Atomic_Bool_Load_Should_Success()
        {
            var atomicBoolean = new Atomic <bool>(true);

            Assert.Equal(true, atomicBoolean.Load(MemoryOrder.Relaxed));
            Assert.Equal(true, atomicBoolean.Load(MemoryOrder.Acquire));
            Assert.Equal(true, atomicBoolean.Load(MemoryOrder.AcqRel));
            Assert.Equal(true, atomicBoolean.Load(MemoryOrder.SeqCst));
        }
        public void Atomic_Int_Load_Should_Success()
        {
            var atomicInteger = new Atomic <int>(int.MaxValue);

            Assert.Equal(int.MaxValue, atomicInteger.Load(MemoryOrder.Relaxed));
            Assert.Equal(int.MaxValue, atomicInteger.Load(MemoryOrder.Acquire));
            Assert.Equal(int.MaxValue, atomicInteger.Load(MemoryOrder.AcqRel));
            Assert.Equal(int.MaxValue, atomicInteger.Load(MemoryOrder.SeqCst));
        }
        public void Atomic_Long_Load_Should_Success()
        {
            var atomicLong = new Atomic <long>(long.MaxValue);

            Assert.Equal(long.MaxValue, atomicLong.Load(MemoryOrder.Relaxed));
            Assert.Equal(long.MaxValue, atomicLong.Load(MemoryOrder.Acquire));
            Assert.Equal(long.MaxValue, atomicLong.Load(MemoryOrder.AcqRel));
            Assert.Equal(long.MaxValue, atomicLong.Load(MemoryOrder.SeqCst));
        }
Esempio n. 13
0
            public void Push(int n)
            {
                Node currentHead;
                Node newHead = new Node(n);

                do
                {
                    currentHead = _head.Load(MemoryOrder.Acquire); // TODO, add tests with these relaxed
                    newHead.Next.Store(currentHead, MemoryOrder.Relaxed);
                } while(!_head.CompareExchange(newHead, currentHead, MemoryOrder.AcquireRelease));
            }
Esempio n. 14
0
 public void Thread2()
 {
     interested1.Store(1, MemoryOrder.SequentiallyConsistent);
     while (interested0.Load(MemoryOrder.SequentiallyConsistent) == 1)
     {
         RE.Yield();
     }
     interested1.Store(0, MemoryOrder.SequentiallyConsistent);
 }
Esempio n. 15
0
        private void AcquireThread()
        {
            while (_flag.Load(ActiveConfig.LoadMemoryOrder) == 0)
            {
                RE.Yield();
            }
            int result = _x.Load(MemoryOrder.Relaxed);

            RE.Assert(result == 2, $"Expected to load 2 into result, but loaded {result} instead!");
        }
Esempio n. 16
0
 public int?Dequeue()
 {
     while (true)
     {
         var  localHead      = _head.Load(MemoryOrder.Relaxed);
         var  localHeadNext  = localHead.Next.Load(MemoryOrder.Relaxed);
         bool headHasChanged = localHead != _head.Load(MemoryOrder.Relaxed);
         if (headHasChanged)
         {
             continue;
         }
         if (localHeadNext == null)
         {
             return(null);
         }
         if (_head.CompareExchange(localHeadNext, localHead, MemoryOrder.AcquireRelease))
         {
             return(localHeadNext.Value);
         }
     }
 }
Esempio n. 17
0
        public void FastThread()
        {
            var caseB = _serializeDone; // (B) Serialize done and F has yet to complete its store => Must see S's store (because that store was before serialize)

            A.Store(1, MemoryOrder.Relaxed);
            _fastStoreDone = true;
            if (caseB)
            {
                var seen = B.Load(MemoryOrder.Relaxed);
                RE.Assert(seen == 1, "Should see value stored by slow thread if it has stored & serialized before my (FastThread) store.");
            }
        }
Esempio n. 18
0
 public void FastThread()
 {
     interestedF.Store(1, MemoryOrder.Relaxed);
     victim.Store(0, MemoryOrder.Release);
     while (true)
     {
         if (interestedS.Load(MemoryOrder.Relaxed) != 1)
         {
             break;
         }
         if (victim.Load(MemoryOrder.Relaxed) != 0)
         {
             break;
         }
         RE.Yield();
     }
     RE.Assert(_threadsPassed == 0, $"Fast thread entered while slow thread in critical section! ({_threadsPassed})");
     _threadsPassed++;
     interestedF.Store(0, MemoryOrder.Relaxed);
     _threadsPassed--;
 }
Esempio n. 19
0
        /*
         * From Dice et al:
         *
         * First, we should more precisely state the requirements for SERIALIZE(t).
         * Lets say the "Fast thread" F executes {ST A ; LD B} and the
         * "Slow thread" S executes {ST B; MEMBAR; SERIALIZE(F); LD A;}.
         * Typically, F would act as the BHT or JNI mutator, while
         * S would act in the role of the bias revoker or garbage collector.
         *
         * When Serialize(F) returns, one of the following invariants must hold:
         *
         * (A) If F has completed the {ST A} operation, then the value STed by
         * F into A will be visible to S by the time SERIALIZE(t) returns.
         * That is, the {LD A} executed by S will observe the value STed
         * into A by F.
         *
         * (B) If F has yet to complete the {ST A} operation, then
         * when F executes {LD B}, F will observe the value STed
         * into B by S.
         */

        // It seems to me that a simpler litmus test would just be: For both threads, upon completion,
        // if the other thread has completed its store, then that store must be visible to this thread.
        public void SlowThread()
        {
            B.Store(1, MemoryOrder.Relaxed);
            Fence.InsertProcessWide();
            _serializeDone = true;
            var caseA = _fastStoreDone; // (A) Serialize done and F has completed its store => Must see F's store.

            if (caseA)
            {
                var seen = A.Load(MemoryOrder.Relaxed);
                RE.Assert(seen == 1, "Should see value stored by slow thread if it has done its store by the time serialize done.");
            }
        }
Esempio n. 20
0
 private void DequeuingThread()
 {
     if (ActiveConfig.AddAllBeforeRemove)
     {
         while (!_enqueuingThreadFinished.Load(MemoryOrder.Acquire))
         {
             ;
         }
     }
     Fence.Insert(MemoryOrder.SequentiallyConsistent);
     while (_dequeued.Count < ActiveConfig.NumAddingThreads * ActiveConfig.NumAddedPerThread)
     {
         int?x = _queue.Dequeue();
         if (x.HasValue)
         {
             _dequeued.Add(x.Value);
         }
     }
 }
Esempio n. 21
0
 private void PoppingThread()
 {
     if (ActiveConfig.PushAllBeforePop)
     {
         while (!_pushingThreadFinished.Load(MemoryOrder.Acquire))
         {
             ;
         }
     }
     while (_popped.Count < ActiveConfig.NumPushingThreads * ActiveConfig.NumPushedPerThread)
     {
         int?x = _stack.Pop();
         if (x.HasValue)
         {
             _popped.Add(x.Value);
             _poppedInOrder.Add(x.Value);
         }
     }
 }
Esempio n. 22
0
 private void Thread1()
 {
     flag1.Store(1, MemoryOrder);
     if (ActiveConfig.UseExchange)
     {
         victim.Exchange(1, MemoryOrder.AcquireRelease);
     }
     else
     {
         victim.Store(1, MemoryOrder);
     }
     while (flag0.Load(ActiveConfig.UseExchange ? MemoryOrder.Acquire : MemoryOrder) == 1 && victim.Load(MemoryOrder) == 1)
     {
         RE.Yield();
     }
     ++_threadsPassed;
     RE.Assert(_threadsPassed == 1, $"Mutual exclusion not achieved, {_threadsPassed} threads currently in critical section!");
     flag1.Store(0, ActiveConfig.UseExchange ? MemoryOrder.Release : MemoryOrder);
     --_threadsPassed;
 }
Esempio n. 23
0
 public void SlowThread()
 {
     interestedS.Store(1, MemoryOrder.Relaxed);
     Fence.InsertProcessWide();
     victim.Exchange(1, MemoryOrder.AcquireRelease);
     while (true)
     {
         if (interestedF.Load(MemoryOrder.Relaxed) != 1)
         {
             break;
         }
         if (victim.Load(MemoryOrder.Relaxed) != 1)
         {
             break;
         }
         RE.Yield();
     }
     RE.Assert(_threadsPassed == 0, $"Slow thread entered while fast thread in critical section! ({_threadsPassed})");
     _threadsPassed++;
     interestedS.Store(0, MemoryOrder.Relaxed);
     _threadsPassed--;
 }
Esempio n. 24
0
 public void Thread2()
 {
     x1.Store(1, ActiveConfig.MemoryOrder);
     y1 = x0.Load(ActiveConfig.MemoryOrder);
 }
 public void Atomic_Bool_Load_Should_Success()
 {
     var atomicBoolean = new Atomic<bool>(true);
     Assert.Equal(true, atomicBoolean.Load(MemoryOrder.Relaxed));
     Assert.Equal(true, atomicBoolean.Load(MemoryOrder.Acquire));
     Assert.Equal(true, atomicBoolean.Load(MemoryOrder.AcqRel));
     Assert.Equal(true, atomicBoolean.Load(MemoryOrder.SeqCst));
 }
 public void Atomic_Bool_Load_Should_Fail()
 {
     var atomicBoolean = new Atomic<bool>(true);
     Assert.Throws<InvalidOperationException>(() => atomicBoolean.Load(MemoryOrder.Release));
     Assert.Throws<NotSupportedException>(() => atomicBoolean.Load(MemoryOrder.Consume));
 }
 public void Atomic_Int_Load_Should_Fail()
 {
     var atomicInteger = new Atomic<int>(int.MaxValue);
     Assert.Throws<InvalidOperationException>(() => atomicInteger.Load(MemoryOrder.Release));
     Assert.Throws<NotSupportedException>(() => atomicInteger.Load(MemoryOrder.Consume));
 }
 public void Atomic_Long_Load_Should_Fail()
 {
     var atomicLong = new Atomic<long>(long.MaxValue);
     Assert.Throws<InvalidOperationException>(() => atomicLong.Load(MemoryOrder.Release));
     Assert.Throws<NotSupportedException>(() => atomicLong.Load(MemoryOrder.Consume));
 }