• A fun mutex, part II...

    From Chris M. Thomasson@3:633/280.2 to All on Mon Aug 18 09:51:27 2025
    Fwiw, here is a total experiment of mine. I think it should work, need
    to verify in Relacy (no time right now), but its a way to have a work
    stack and a critical section at the same time. I really need to add in a try_lock that pops the stack while trying to acquire the mutex. Anyway,
    can go give it a go in your spare time? Thanks. Show me some output?

    Be sure to take careful notice of the ct_stack_mutex struct...

    C++20:
    ___________________________________
    // A Fun Mutex Pattern? Or, a Nightmare? Humm...
    // By: Chris M. Thomasson
    //___________________________________________________


    #include <iostream>
    #include <random>
    #include <numeric>
    #include <algorithm>
    #include <thread>
    #include <atomic>
    #include <mutex>
    #include <string>
    #include <semaphore>


    #define CT_WORKERS (100)
    #define CT_ITERS (1000000)


    static std::atomic<unsigned long> g_ct_work_alloc = { 0 };
    static std::atomic<unsigned long> g_ct_work_dealloc = { 0 };


    #define CT_UNLOCKED (nullptr)
    #define CT_CONTENTION (reinterpret_cast<ct_work*>(0xDEADBEEF))


    struct ct_work
    {
    std::atomic<ct_work*> m_next;
    std::string m_payload;

    ct_work(std::string const& payload)
    : m_next(nullptr),
    m_payload(payload)
    {
    g_ct_work_alloc.fetch_add(1, std::memory_order_relaxed);
    }

    ~ct_work()
    {
    g_ct_work_dealloc.fetch_add(1, std::memory_order_relaxed);
    }

    void
    dump() const
    {
    std::cout << "(" << this << ")->ct_work::m_payload = " <<
    m_payload << "\n";
    }
    };



    struct ct_stack_mutex
    {
    std::atomic<ct_work*> m_head = { CT_UNLOCKED };
    std::binary_semaphore m_wset{ 0 };


    ct_work*
    lock(ct_work* work)
    {
    ct_work* self_work = m_head.exchange(work, std::memory_order_acquire);
    ct_work* local_work = nullptr;

    if (self_work != CT_UNLOCKED || self_work == CT_CONTENTION)
    {
    if (self_work != CT_CONTENTION)
    {
    self_work->m_next.store(local_work, std::memory_order_relaxed);
    local_work = self_work;
    }

    for (;;)
    {
    self_work = m_head.exchange(CT_CONTENTION, std::memory_order_acquire);

    if (self_work != CT_UNLOCKED && self_work != CT_CONTENTION)
    {
    self_work->m_next.store(local_work, std::memory_order_relaxed);
    local_work = self_work;
    }

    if (self_work == CT_UNLOCKED)
    {
    break;
    }

    m_wset.acquire();
    }

    }

    else if (self_work != CT_UNLOCKED)
    {
    self_work->m_next.store(local_work, std::memory_order_relaxed);
    local_work = self_work;
    }

    return local_work;
    }

    ct_work*
    unlock()
    {
    ct_work* self_work = m_head.exchange(CT_UNLOCKED);

    if (self_work == CT_CONTENTION)
    {
    m_wset.release();
    return nullptr;
    }

    return self_work;
    }

    };




    struct ct_shared
    {
    ct_stack_mutex m_mutex;
    unsigned long m_var0 = 0;
    };



    void
    ct_worker_entry(
    ct_shared& shared
    ) {
    //std::cout << "ct_worker_entry" << std::endl; // testing thread
    race for sure...

    {
    for (unsigned long i = 0; i < CT_ITERS; ++i)
    {
    ct_work* w0 = new ct_work("ct_work");

    ct_work* wlock = shared.m_mutex.lock(w0);

    {
    shared.m_var0 += 2;
    }

    ct_work* wunlock = shared.m_mutex.unlock();

    while (wlock)
    {
    ct_work* next = wlock->m_next.load(std::memory_order_relaxed);
    delete wlock;
    wlock = next;
    }

    while (wunlock)
    {
    ct_work* next = wunlock->m_next.load(std::memory_order_relaxed);
    delete wunlock;
    wunlock = next;
    }
    }
    }
    }


    int main()
    {
    // Hello... :^)
    {
    std::cout << "Hello ct_fun_mutex... lol? ;^) ver:(0.0.0)\n";
    std::cout << "By: Chris M. Thomasson\n";
    std::cout << "____________________________________________________\n";
    std::cout.flush();
    }

    // Create our fun things... ;^)
    ct_shared shared = { };
    std::thread workers[CT_WORKERS] = { };

    // Lanuch...
    {
    std::cout << "Launching " << CT_WORKERS << " Threads...\n";
    std::cout.flush();

    for (unsigned long i = 0; i < CT_WORKERS; ++i)
    {
    workers[i] = std::thread(ct_worker_entry, std::ref(shared));
    }
    }

    // Join...
    {
    std::cout << "Joining Threads... (computing :^)\n";
    std::cout.flush();
    for (unsigned long i = 0; i < CT_WORKERS; ++i)
    {
    workers[i].join();
    }
    }



    // Sanity Check...
    {
    std::cout << "shared.m_var0 = " << shared.m_var0 << "\n";
    std::cout << "g_ct_work_alloc = " << g_ct_work_alloc.load(std::memory_order_relaxed) << "\n";
    std::cout << "g_ct_work_dealloc = " << g_ct_work_dealloc.load(std::memory_order_relaxed) << "\n";

    if (g_ct_work_alloc != g_ct_work_dealloc ||
    shared.m_var0 != CT_WORKERS * CT_ITERS * 2)
    {
    std::cout << "\nOh God damn it!!!! ;^o\n";
    }
    }


    // Fin...
    {
    std::cout << "____________________________________________________\n";
    std::cout << "Fin... :^)\n" << std::endl;
    }

    return 0;
    }
    ___________________________________



    --- MBSE BBS v1.1.2 (Linux-x86_64)
    * Origin: A noiseless patient Spider (3:633/280.2@fidonet)
  • From Chris M. Thomasson@3:633/280.2 to All on Mon Aug 18 10:07:51 2025
    On 8/17/2025 4:51 PM, Chris M. Thomasson wrote:
    Fwiw, here is a total experiment of mine. I think it should work, need
    to verify in Relacy (no time right now), but its a way to have a work
    stack and a critical section at the same time. I really need to add in a try_lock that pops the stack while trying to acquire the mutex. Anyway,
    can go give it a go in your spare time? Thanks. Show me some output?

    Be sure to take careful notice of the ct_stack_mutex struct...

    C++20:
    ___________________________________
    [...]
    ÿÿÿ ct_work*
    ÿÿÿ unlock()
    ÿÿÿ {
    ÿÿÿÿÿÿÿ ct_work* self_work = m_head.exchange(CT_UNLOCKED);


    ^^^^^^^^^^^^^^^^^^^^^^^^^^^

    UGGGG! That should be using a std::memory_order_acq_rel. I think the
    default is seq_cst. Shit! GRRRR!!!!!!!!!!

    Sorry. I mean it will still work, but the membar of seq_cst is too
    strong. I think that acq_rel is in order because not only are we
    unlocking the mutex, but we are also flushing the atomic stack.


    ÿÿÿÿÿÿÿ if (self_work == CT_CONTENTION)
    ÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿ m_wset.release();
    ÿÿÿÿÿÿÿÿÿÿÿ return nullptr;
    ÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿ return self_work;
    ÿÿÿ }
    [...]


    --- MBSE BBS v1.1.2 (Linux-x86_64)
    * Origin: A noiseless patient Spider (3:633/280.2@fidonet)
  • From Chris M. Thomasson@3:633/280.2 to All on Mon Aug 18 10:15:35 2025
    On 8/17/2025 4:51 PM, Chris M. Thomasson wrote:
    Fwiw, here is a total experiment of mine. I think it should work, need
    to verify in Relacy (no time right now), but its a way to have a work
    stack and a critical section at the same time. I really need to add in a try_lock that pops the stack while trying to acquire the mutex. Anyway,
    can go give it a go in your spare time? Thanks. Show me some output?

    Be sure to take careful notice of the ct_stack_mutex struct...

    C++20:
    ___________________________________
    [...]
    struct ct_stack_mutex
    {
    ÿÿÿ std::atomic<ct_work*> m_head = { CT_UNLOCKED };
    ÿÿÿ std::binary_semaphore m_wset{ 0 };


    ÿÿÿ ct_work*
    ÿÿÿ lock(ct_work* work)
    ÿÿÿ {
    ÿÿÿÿÿÿÿ ct_work* self_work = m_head.exchange(work, std::memory_order_acquire);

    ^^^^^^^^^^^^

    Actually I think that should be acq_rel as well.


    ÿÿÿÿÿÿÿ ct_work* local_work = nullptr;

    ÿÿÿÿÿÿÿ if (self_work != CT_UNLOCKED || self_work == CT_CONTENTION)
    ÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿ if (self_work != CT_CONTENTION)
    ÿÿÿÿÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ self_work->m_next.store(local_work, std::memory_order_relaxed);
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ local_work = self_work;
    ÿÿÿÿÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿÿÿÿÿ for (;;)
    ÿÿÿÿÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ self_work = m_head.exchange(CT_CONTENTION, std::memory_order_acquire);

    Ditto. Well, we are pushing ct_work nodes into the atomic stack, and
    trying to lock the mutex at the same time. So, acq_rel is in order.
    Humm... That is a bit of an expensive membar, so to speak, well, vs
    seq_cst is better, but humm...


    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ if (self_work != CT_UNLOCKED && self_work !=
    CT_CONTENTION)
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ self_work->m_next.store(local_work, std::memory_order_relaxed);
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ local_work = self_work;
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ if (self_work == CT_UNLOCKED)
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ break;
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ m_wset.acquire();
    ÿÿÿÿÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿ else if (self_work != CT_UNLOCKED)
    ÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿ self_work->m_next.store(local_work,
    std::memory_order_relaxed);
    ÿÿÿÿÿÿÿÿÿÿÿ local_work = self_work;
    ÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿ return local_work;
    ÿÿÿ }

    Will code it up in Relacy, perhaps tonight.


    [...]


    --- MBSE BBS v1.1.2 (Linux-x86_64)
    * Origin: A noiseless patient Spider (3:633/280.2@fidonet)
  • From Bonita Montero@3:633/280.2 to All on Mon Aug 18 17:19:25 2025
    Am 18.08.2025 um 01:51 schrieb Chris M. Thomasson:
    Fwiw, here is a total experiment of mine. I think it should work, need
    to verify in Relacy (no time right now), but its a way to have a work
    stack and a critical section at the same time. I really need to add in a try_lock that pops the stack while trying to acquire the mutex. Anyway,
    can go give it a go in your spare time? Thanks. Show me some output?

    Be sure to take careful notice of the ct_stack_mutex struct...


    Sorry, you still don't understand that your idea is complete nonsense
    because the "otherwise-task" could be completely omitted. And popping
    a stack alone isn't sth. meaningful.
    Find a paper that describes your idea. I'm pretty sure a lot of people
    had this idea and they've all withdrawn it because of the issues I
    mentioned.
    You seem to be manic. You're focussed on details and you don't see the abstraction levels above that which make ideas like that completely
    gratitious. Take care that it doesn't get worse.

    C++20:
    ___________________________________
    // A Fun Mutex Pattern? Or, a Nightmare? Humm...
    // By: Chris M. Thomasson //___________________________________________________


    #include <iostream>
    #include <random>
    #include <numeric>
    #include <algorithm>
    #include <thread>
    #include <atomic>
    #include <mutex>
    #include <string>
    #include <semaphore>


    #define CT_WORKERS (100)
    #define CT_ITERS (1000000)


    static std::atomic<unsigned long> g_ct_work_alloc = { 0 };
    static std::atomic<unsigned long> g_ct_work_dealloc = { 0 };


    #define CT_UNLOCKED (nullptr)
    #define CT_CONTENTION (reinterpret_cast<ct_work*>(0xDEADBEEF))


    struct ct_work
    {
    ÿÿÿ std::atomic<ct_work*> m_next;
    ÿÿÿ std::string m_payload;

    ÿÿÿ ct_work(std::string const& payload)
    ÿÿÿ :ÿÿ m_next(nullptr),
    ÿÿÿÿÿÿÿ m_payload(payload)
    ÿÿÿ {
    ÿÿÿÿÿÿÿ g_ct_work_alloc.fetch_add(1, std::memory_order_relaxed);
    ÿÿÿ }

    ÿÿÿ ~ct_work()
    ÿÿÿ {
    ÿÿÿÿÿÿÿ g_ct_work_dealloc.fetch_add(1, std::memory_order_relaxed);
    ÿÿÿ }

    ÿÿÿ void
    ÿÿÿ dump() const
    ÿÿÿ {
    ÿÿÿÿÿÿÿ std::cout << "(" << this << ")->ct_work::m_payload = " <<
    m_payload << "\n";
    ÿÿÿ }
    };



    struct ct_stack_mutex
    {
    ÿÿÿ std::atomic<ct_work*> m_head = { CT_UNLOCKED };
    ÿÿÿ std::binary_semaphore m_wset{ 0 };


    ÿÿÿ ct_work*
    ÿÿÿ lock(ct_work* work)
    ÿÿÿ {
    ÿÿÿÿÿÿÿ ct_work* self_work = m_head.exchange(work, std::memory_order_acquire);
    ÿÿÿÿÿÿÿ ct_work* local_work = nullptr;

    ÿÿÿÿÿÿÿ if (self_work != CT_UNLOCKED || self_work == CT_CONTENTION)
    ÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿ if (self_work != CT_CONTENTION)
    ÿÿÿÿÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ self_work->m_next.store(local_work, std::memory_order_relaxed);
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ local_work = self_work;
    ÿÿÿÿÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿÿÿÿÿ for (;;)
    ÿÿÿÿÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ self_work = m_head.exchange(CT_CONTENTION, std::memory_order_acquire);

    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ if (self_work != CT_UNLOCKED && self_work !=
    CT_CONTENTION)
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ self_work->m_next.store(local_work, std::memory_order_relaxed);
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ local_work = self_work;
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ if (self_work == CT_UNLOCKED)
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ break;
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ m_wset.acquire();
    ÿÿÿÿÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿ else if (self_work != CT_UNLOCKED)
    ÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿ self_work->m_next.store(local_work,
    std::memory_order_relaxed);
    ÿÿÿÿÿÿÿÿÿÿÿ local_work = self_work;
    ÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿ return local_work;
    ÿÿÿ }

    ÿÿÿ ct_work*
    ÿÿÿ unlock()
    ÿÿÿ {
    ÿÿÿÿÿÿÿ ct_work* self_work = m_head.exchange(CT_UNLOCKED);

    ÿÿÿÿÿÿÿ if (self_work == CT_CONTENTION)
    ÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿ m_wset.release();
    ÿÿÿÿÿÿÿÿÿÿÿ return nullptr;
    ÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿ return self_work;
    ÿÿÿ }

    };




    struct ct_shared
    {
    ÿÿÿ ct_stack_mutex m_mutex;
    ÿÿÿ unsigned long m_var0 = 0;
    };



    void
    ct_worker_entry(
    ÿÿÿ ct_shared& shared
    ) {
    ÿÿÿ //std::cout << "ct_worker_entry" << std::endl; // testing thread
    race for sure...

    ÿÿÿ {
    ÿÿÿÿÿÿÿ for (unsigned long i = 0; i < CT_ITERS; ++i)
    ÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿ ct_work* w0 = new ct_work("ct_work");

    ÿÿÿÿÿÿÿÿÿÿÿ ct_work* wlock = shared.m_mutex.lock(w0);

    ÿÿÿÿÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ shared.m_var0 += 2;
    ÿÿÿÿÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿÿÿÿÿ ct_work* wunlock = shared.m_mutex.unlock();

    ÿÿÿÿÿÿÿÿÿÿÿ while (wlock)
    ÿÿÿÿÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ ct_work* next = wlock-
    m_next.load(std::memory_order_relaxed);
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ delete wlock;
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ wlock = next;
    ÿÿÿÿÿÿÿÿÿÿÿ }

    ÿÿÿÿÿÿÿÿÿÿÿ while (wunlock)
    ÿÿÿÿÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ ct_work* next = wunlock-
    m_next.load(std::memory_order_relaxed);
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ delete wunlock;
    ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ wunlock = next;
    ÿÿÿÿÿÿÿÿÿÿÿ }
    ÿÿÿÿÿÿÿ }
    ÿÿÿ }
    }


    int main()
    {
    ÿÿÿ // Hello... :^)
    ÿÿÿ {
    ÿÿÿÿÿÿÿ std::cout << "Hello ct_fun_mutex... lol? ;^) ver:(0.0.0)\n";
    ÿÿÿÿÿÿÿ std::cout << "By: Chris M. Thomasson\n";
    ÿÿÿÿÿÿÿ std::cout << "____________________________________________________\n";
    ÿÿÿÿÿÿÿ std::cout.flush();
    ÿÿÿ }

    ÿÿÿ // Create our fun things... ;^)
    ÿÿÿ ct_shared shared = { };
    ÿÿÿ std::thread workers[CT_WORKERS] = { };

    ÿÿÿ // Lanuch...
    ÿÿÿ {
    ÿÿÿÿÿÿÿ std::cout << "Launching " << CT_WORKERS << " Threads...\n";
    ÿÿÿÿÿÿÿ std::cout.flush();

    ÿÿÿÿÿÿÿ for (unsigned long i = 0; i < CT_WORKERS; ++i)
    ÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿ workers[i] = std::thread(ct_worker_entry, std::ref(shared));
    ÿÿÿÿÿÿÿ }
    ÿÿÿ }

    ÿÿÿ // Join...
    ÿÿÿ {
    ÿÿÿÿÿÿÿ std::cout << "Joining Threads... (computing :^)\n";
    ÿÿÿÿÿÿÿ std::cout.flush();
    ÿÿÿÿÿÿÿ for (unsigned long i = 0; i < CT_WORKERS; ++i)
    ÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿ workers[i].join();
    ÿÿÿÿÿÿÿ }
    ÿÿÿ }



    ÿÿÿ // Sanity Check...
    ÿÿÿ {
    ÿÿÿÿÿÿÿ std::cout << "shared.m_var0 = " << shared.m_var0 << "\n";
    ÿÿÿÿÿÿÿ std::cout << "g_ct_work_alloc = " << g_ct_work_alloc.load(std::memory_order_relaxed) << "\n";
    ÿÿÿÿÿÿÿ std::cout << "g_ct_work_dealloc = " << g_ct_work_dealloc.load(std::memory_order_relaxed) << "\n";

    ÿÿÿÿÿÿÿ if (g_ct_work_alloc != g_ct_work_dealloc ||
    ÿÿÿÿÿÿÿÿÿÿÿ shared.m_var0 != CT_WORKERS * CT_ITERS * 2)
    ÿÿÿÿÿÿÿ {
    ÿÿÿÿÿÿÿÿÿÿÿ std::cout << "\nOh God damn it!!!! ;^o\n";
    ÿÿÿÿÿÿÿ }
    ÿÿÿ }


    ÿÿÿ // Fin...
    ÿÿÿ {
    ÿÿÿÿÿÿÿ std::cout << "____________________________________________________\n";
    ÿÿÿÿÿÿÿ std::cout << "Fin... :^)\n" << std::endl;
    ÿÿÿ }

    ÿÿÿ return 0;
    }
    ___________________________________




    --- MBSE BBS v1.1.2 (Linux-x86_64)
    * Origin: A noiseless patient Spider (3:633/280.2@fidonet)
  • From Chris M. Thomasson@3:633/280.2 to All on Tue Aug 19 05:37:16 2025
    On 8/18/2025 12:19 AM, Bonita Montero wrote:
    Am 18.08.2025 um 01:51 schrieb Chris M. Thomasson:
    Fwiw, here is a total experiment of mine. I think it should work, need
    to verify in Relacy (no time right now), but its a way to have a work
    stack and a critical section at the same time. I really need to add in
    a try_lock that pops the stack while trying to acquire the mutex.
    Anyway, can go give it a go in your spare time? Thanks. Show me some
    output?

    Be sure to take careful notice of the ct_stack_mutex struct...


    Sorry, you still don't understand that your idea is complete nonsense
    because the "otherwise-task" could be completely omitted. And popping
    a stack alone isn't sth. meaningful.

    The act of popping from a stack is doing some work, right? I just wanted
    to see if I could combine a mutex and a stack for fun. Somebody might
    find it useful, but I don't know and don't really care because well, it
    was for fun to begin with. Having discussions about it is also fun.


    Find a paper that describes your idea. I'm pretty sure a lot of people
    had this idea and they've all withdrawn it because of the issues I
    mentioned.

    Oh my. May I remind you that you that you have been totally wrong many
    times in the past about multiple things in our many discussions. You
    were so sure you were correct, but dead wrong! Can you remember some of
    them? Sigh.


    You seem to be manic.

    You are a nice guy... ;^o


    You're focussed on details and you don't see the
    abstraction levels above that which make ideas like that completely gratitious. Take care that it doesn't get worse.

    lol. I made this stack mutex hybrid for fun. Don't project on me.


    [...]


    --- MBSE BBS v1.1.2 (Linux-x86_64)
    * Origin: A noiseless patient Spider (3:633/280.2@fidonet)