Isolate thread pool loops

Now that we are threading things in different frames, we need to watch
that we are not waiting for a process to complete in one frame while
working in another.  To accomplish this, we only wait for our own loop
results

Fixes https://gitlab.com/kicad/code/kicad/-/issues/20572

(cherry picked from commit bccf36538065a8c318dcdb2bc8b28bd855fb5e81)
This commit is contained in:
Seth Hillbrand 2025-04-07 12:14:41 -07:00
parent 6446e01483
commit 7bee2cf842
4 changed files with 61 additions and 59 deletions

View File

@ -274,10 +274,12 @@ void RENDER_3D_RAYTRACE_BASE::renderTracing( uint8_t* ptrPBO, REPORTER* aStatusR
}
};
for( size_t i = 0; i < tp.get_thread_count() + 1; ++i )
tp.push_task( processBlocks );
BS::multi_future<void> futures;
tp.wait_for_tasks();
for( size_t i = 0; i < tp.get_thread_count(); ++i )
futures.push_back( tp.submit( processBlocks ) );
futures.wait();
m_blockRenderProgressCount += numBlocksRendered;

View File

@ -1349,13 +1349,13 @@ void CONNECTION_GRAPH::updateItemConnectivity( const SCH_SHEET_PATH& aSheet,
thread_pool& tp = GetKiCadThreadPool();
tp.push_loop( connection_vec.size(),
[&]( const int a, const int b)
{
for( int ii = a; ii < b; ++ii )
update_lambda( connection_vec[ii] );
});
tp.wait_for_tasks();
auto results = tp.parallelize_loop( connection_vec.size(),
[&]( const int a, const int b)
{
for( int ii = a; ii < b; ++ii )
update_lambda( connection_vec[ii] );
});
results.wait();
}
}
@ -1506,13 +1506,13 @@ void CONNECTION_GRAPH::resolveAllDrivers()
thread_pool& tp = GetKiCadThreadPool();
tp.push_loop( dirty_graphs.size(),
[&]( const int a, const int b)
{
for( int ii = a; ii < b; ++ii )
update_lambda( dirty_graphs[ii] );
});
tp.wait_for_tasks();
auto results = tp.parallelize_loop( dirty_graphs.size(),
[&]( const int a, const int b)
{
for( int ii = a; ii < b; ++ii )
update_lambda( dirty_graphs[ii] );
});
results.wait();
// Now discard any non-driven subgraphs from further consideration
@ -2146,14 +2146,14 @@ void CONNECTION_GRAPH::buildConnectionGraph( std::function<void( SCH_ITEM* )>* a
thread_pool& tp = GetKiCadThreadPool();
tp.push_loop( m_driver_subgraphs.size(),
[&]( const int a, const int b)
{
for( int ii = a; ii < b; ++ii )
m_driver_subgraphs[ii]->UpdateItemConnections();
});
auto results = tp.parallelize_loop( m_driver_subgraphs.size(),
[&]( const int a, const int b)
{
for( int ii = a; ii < b; ++ii )
m_driver_subgraphs[ii]->UpdateItemConnections();
});
tp.wait_for_tasks();
results.wait();
// Next time through the subgraphs, we do some post-processing to handle things like
// connecting bus members to their neighboring subgraphs, and then propagate connections
@ -2352,13 +2352,13 @@ void CONNECTION_GRAPH::buildConnectionGraph( std::function<void( SCH_ITEM* )>* a
return 1;
};
tp.push_loop( m_driver_subgraphs.size(),
[&]( const int a, const int b)
{
for( int ii = a; ii < b; ++ii )
updateItemConnectionsTask( m_driver_subgraphs[ii] );
});
tp.wait_for_tasks();
auto results2 = tp.parallelize_loop( m_driver_subgraphs.size(),
[&]( const int a, const int b)
{
for( int ii = a; ii < b; ++ii )
updateItemConnectionsTask( m_driver_subgraphs[ii] );
});
results2.wait();
m_net_code_to_subgraphs_map.clear();
m_net_name_to_subgraphs_map.clear();

View File

@ -140,13 +140,13 @@ void SPICE_LIBRARY_PARSER::ReadFile( const wxString& aFilePath, REPORTER& aRepor
// Read all self-contained models in parallel
thread_pool& tp = GetKiCadThreadPool();
tp.push_loop( modelQueue.size(),
[&]( const int a, const int b )
{
for( int ii = a; ii < b; ++ii )
createModel( ii, true );
} );
tp.wait_for_tasks();
auto results = tp.parallelize_loop( modelQueue.size(),
[&]( const int a, const int b )
{
for( int ii = a; ii < b; ++ii )
createModel( ii, true );
} );
results.wait();
// Now read all models that might refer to other models in order.
for( int ii = 0; ii < (int) modelQueue.size(); ++ii )

View File

@ -192,21 +192,21 @@ void CONNECTIVITY_DATA::updateRatsnest()
thread_pool& tp = GetKiCadThreadPool();
tp.push_loop( dirty_nets.size(),
[&]( const int a, const int b )
{
for( int ii = a; ii < b; ++ii )
dirty_nets[ii]->UpdateNet();
} );
tp.wait_for_tasks();
auto results = tp.parallelize_loop( dirty_nets.size(),
[&]( const int a, const int b )
{
for( int ii = a; ii < b; ++ii )
dirty_nets[ii]->UpdateNet();
} );
results.wait();
tp.push_loop( dirty_nets.size(),
[&]( const int a, const int b )
{
for( int ii = a; ii < b; ++ii )
dirty_nets[ii]->OptimizeRNEdges();
} );
tp.wait_for_tasks();
auto results2 = tp.parallelize_loop( dirty_nets.size(),
[&]( const int a, const int b )
{
for( int ii = a; ii < b; ++ii )
dirty_nets[ii]->OptimizeRNEdges();
} );
results2.wait();
#ifdef PROFILE
rnUpdate.Show();
@ -374,13 +374,13 @@ void CONNECTIVITY_DATA::ComputeLocalRatsnest( const std::vector<BOARD_ITEM*>& aI
thread_pool& tp = GetKiCadThreadPool();
size_t num_nets = std::min( m_nets.size(), aDynamicData->m_nets.size() );
tp.push_loop( 1, num_nets,
[&]( const int a, const int b)
{
for( int ii = a; ii < b; ++ii )
update_lambda( ii );
});
tp.wait_for_tasks();
auto results = tp.parallelize_loop( 1, num_nets,
[&]( const int a, const int b)
{
for( int ii = a; ii < b; ++ii )
update_lambda( ii );
});
results.wait();
// This gets the ratsnest for internal connections in the moving set
const std::vector<CN_EDGE>& edges = GetRatsnestForItems( aItems );