#include <atomic>
#include <thread>
std::vector<int> queue_data;
std::atomic<int> count;
void populate_queue()
{
unsigned const number_of_items=20;
queue_data.clear();
for(unsigned i=0;i<number_of_items;++i)
{
queue_data.push_back(i);
}
count.store(number_of_items,std::memory_order_release);
}
void consume_queue_items()
{
while(true)
{
int item_index;
if((item_index=count.fetch_sub(1,std::memory_order_acquire))<=0)
{
wait_for_more_items();
continue;
}
process(queue_data[item_index-1]);
}
}
int main() {
std::thread a(populate_queue);
std::thread b(consume_queue_items);
std::thread c(consume_queue_items);
a.join();
b.join();
c.join();
}
This is a code snippet from book C++ concurrency in action, section 5.3.4.
Author says that
Without the release sequence rule or memory_order_release on the
fetch_sub operations, there would be nothing to require that the
stores to the queue_data were visible to the second consumer, and you
would have a data race.
I don’t understand why do we need to release on fetch_sub to synchronize data stored (ofcourse we need to synchronize count to avoid duplicate processing) but why doesn’t 2nd consumer synchronize with release of populate_queue
thread?