Compare commits

...

14 Commits

Author SHA1 Message Date
Jan Wichmann
2d7bf7340b Merge branch 'kicad@sortTemples' into 'master'
ADDED: Sort the templates alphabetically, leaving the default template at the top.

See merge request kicad/code/kicad!2307
2025-09-11 15:46:15 +02:00
jean-pierre charras
45166bf5c3 Gerbview: fix broken behavior for deprecated command IPPOS and IPNEG
Fixes https://gitlab.com/kicad/code/kicad/-/issues/21715
2025-09-11 14:30:50 +02:00
Jeff Young
6ab6283e2e LIBEVAL::CONTEXT manages its own local VALUEs.
Don't use std::unique_ptr as we'll just free the
value right after storing it.

Also, don't try to execute a non-existent function.

Fixes https://gitlab.com/kicad/code/kicad/-/issues/21697
2025-09-11 12:49:41 +01:00
Seth Hillbrand
fc7d91214d Make pasting in lib tables easier
You generally copy/paste whole rows in lib tables, so make this workflow
easier.  Allows pasting rows as new data.  Prevent overwriting existing
data and don't force pasting from the first column
2025-09-11 02:30:49 -07:00
Seth Hillbrand
dcbadb5857 Allow drag-drop for schematic elements
Dragging screen elements over a subsheet allows moving elements into a
subsheet
2025-09-11 02:16:47 -07:00
jean-pierre charras
3b97804cb6 DIALOG_FOOTPRINT_PROPERTIES_FP_EDITOR: add missing layers to always allowed list 2025-09-11 11:07:16 +02:00
Mark Roszko
e72def55a9 Remove moronic pybind forcing expectation of python release builds 2025-09-11 07:16:52 +00:00
Seth Hillbrand
fcf40deae2 Scale down icons that are too big
In template view, if the icon is too big, try to fit to our size
2025-09-10 21:58:02 -07:00
Seth Hillbrand
ef602be91f Allow drawing subsheet with click+drag
Interestingly, the majority of people in a KiCad training course wanted
to draw subsheets this way.  There is no real reason to keep the
existing select behavior, so this greases some skids
2025-09-10 21:55:31 -07:00
Seth Hillbrand
bd5cb76fcd Sync pin shape between sheet/hier labels 2025-09-10 21:47:09 -07:00
Seth Hillbrand
de26550b5a Add stubs for compiling 2025-09-10 21:23:37 -07:00
Seth Hillbrand
7deff606be Update Pybind11 to 3.0.1 2025-09-10 13:02:24 -07:00
Seth Hillbrand
6e2b20ed0e Update BS Threadpool to 5.0 2025-09-10 13:02:24 -07:00
Jan Wichmann
d9e2bb00be ADDED: Sort the templates alphabetically, leaving the default template at the top. 2025-09-10 12:29:56 +02:00
354 changed files with 35147 additions and 9689 deletions

View File

@ -277,7 +277,7 @@ void RENDER_3D_RAYTRACE_BASE::renderTracing( uint8_t* ptrPBO, REPORTER* aStatusR
BS::multi_future<void> futures;
for( size_t i = 0; i < tp.get_thread_count(); ++i )
futures.push_back( tp.submit( processBlocks ) );
futures.push_back( tp.submit_task( processBlocks ) );
futures.wait();

View File

@ -190,7 +190,7 @@ void DESIGN_BLOCK_LIST_IMPL::loadDesignBlocks()
};
for( size_t ii = 0; ii < num_elements; ++ii )
returns[ii] = tp.submit( db_thread );
returns[ii] = tp.submit_task( db_thread );
for( const std::future<size_t>& ret : returns )
{

View File

@ -19,11 +19,23 @@
#include "lib_table_grid_tricks.h"
#include "lib_table_grid.h"
#include <wx/clipbrd.h>
#include <wx/log.h>
LIB_TABLE_GRID_TRICKS::LIB_TABLE_GRID_TRICKS( WX_GRID* aGrid ) :
GRID_TRICKS( aGrid )
{
m_grid->Disconnect( wxEVT_CHAR_HOOK );
m_grid->Connect( wxEVT_CHAR_HOOK, wxCharEventHandler( LIB_TABLE_GRID_TRICKS::onCharHook ), nullptr, this );
}
LIB_TABLE_GRID_TRICKS::LIB_TABLE_GRID_TRICKS( WX_GRID* aGrid,
std::function<void( wxCommandEvent& )> aAddHandler ) :
GRID_TRICKS( aGrid, aAddHandler )
{
m_grid->Disconnect( wxEVT_CHAR_HOOK );
m_grid->Connect( wxEVT_CHAR_HOOK, wxCharEventHandler( LIB_TABLE_GRID_TRICKS::onCharHook ), nullptr, this );
}
@ -134,6 +146,61 @@ void LIB_TABLE_GRID_TRICKS::doPopupSelection( wxCommandEvent& event )
GRID_TRICKS::doPopupSelection( event );
}
}
void LIB_TABLE_GRID_TRICKS::onCharHook( wxKeyEvent& ev )
{
if( ev.GetModifiers() == wxMOD_CONTROL && ev.GetKeyCode() == 'V' && m_grid->IsCellEditControlShown() )
{
wxLogNull doNotLog;
if( wxTheClipboard->Open() )
{
if( wxTheClipboard->IsSupported( wxDF_TEXT ) || wxTheClipboard->IsSupported( wxDF_UNICODETEXT ) )
{
wxTextDataObject data;
wxTheClipboard->GetData( data );
wxString text = data.GetText();
if( !text.Contains( '\t' ) && text.Contains( ',' ) )
text.Replace( ',', '\t' );
if( text.Contains( '\t' ) || text.Contains( '\n' ) || text.Contains( '\r' ) )
{
m_grid->CancelPendingChanges();
int row = m_grid->GetGridCursorRow();
// Check if the current row already has data (has a nickname)
wxGridTableBase* table = m_grid->GetTable();
if( table && row >= 0 && row < table->GetNumberRows() )
{
// Check if the row has a nickname (indicating it has existing data)
wxString nickname = table->GetValue( row, COL_NICKNAME );
if( !nickname.IsEmpty() )
{
// Row already has data, don't allow pasting over it
wxTheClipboard->Close();
wxBell(); // Provide audio feedback
return;
}
}
m_grid->ClearSelection();
m_grid->SelectRow( row );
m_grid->SetGridCursor( row, 0 );
getSelectedArea();
paste_text( text );
wxTheClipboard->Close();
m_grid->ForceRefresh();
return;
}
}
wxTheClipboard->Close();
}
}
GRID_TRICKS::onCharHook( ev );
}
bool LIB_TABLE_GRID_TRICKS::handleDoubleClick( wxGridEvent& aEvent )

View File

@ -1160,7 +1160,9 @@ void UOP::Exec( CONTEXT* ctx )
return;
case TR_OP_METHOD_CALL:
m_func( ctx, m_ref.get() );
if( m_func )
m_func( ctx, m_ref.get() );
return;
default:
@ -1321,9 +1323,8 @@ VALUE* UCODE::Run( CONTEXT* ctx )
}
catch(...)
{
// rules which fail outright should not be fired
std::unique_ptr<VALUE> temp_false = std::make_unique<VALUE>( 0 );
return ctx->StoreValue( temp_false.get() );
// rules which fail outright should not be fired; return 0/false
return ctx->StoreValue( new VALUE( 0 ) );
}
if( ctx->SP() == 1 )
@ -1339,8 +1340,7 @@ VALUE* UCODE::Run( CONTEXT* ctx )
wxASSERT( ctx->SP() == 1 );
// non-well-formed rules should not be fired on a release build
std::unique_ptr<VALUE> temp_false = std::make_unique<VALUE>( 0 );
return ctx->StoreValue( temp_false.get() );
return ctx->StoreValue( new VALUE( 0 ) );
}
}

View File

@ -1614,11 +1614,10 @@ void CONNECTION_GRAPH::resolveAllDrivers()
thread_pool& tp = GetKiCadThreadPool();
auto results = tp.parallelize_loop( dirty_graphs.size(),
[&]( const int a, const int b)
auto results = tp.submit_loop( 0, dirty_graphs.size(),
[&]( const int ii )
{
for( int ii = a; ii < b; ++ii )
update_lambda( dirty_graphs[ii] );
update_lambda( dirty_graphs[ii] );
});
results.wait();
@ -2257,11 +2256,10 @@ void CONNECTION_GRAPH::buildConnectionGraph( std::function<void( SCH_ITEM* )>* a
thread_pool& tp = GetKiCadThreadPool();
auto results = tp.parallelize_loop( m_driver_subgraphs.size(),
[&]( const int a, const int b)
auto results = tp.submit_loop( 0, m_driver_subgraphs.size(),
[&]( const int ii )
{
for( int ii = a; ii < b; ++ii )
m_driver_subgraphs[ii]->UpdateItemConnections();
m_driver_subgraphs[ii]->UpdateItemConnections();
});
results.wait();
@ -2464,12 +2462,11 @@ void CONNECTION_GRAPH::buildConnectionGraph( std::function<void( SCH_ITEM* )>* a
return 1;
};
auto results2 = tp.parallelize_loop( m_driver_subgraphs.size(),
[&]( const int a, const int b)
auto results2 = tp.submit_loop( 0, m_driver_subgraphs.size(),
[&]( const int ii )
{
for( int ii = a; ii < b; ++ii )
updateItemConnectionsTask( m_driver_subgraphs[ii] );
});
updateItemConnectionsTask( m_driver_subgraphs[ii] );
} );
results2.wait();
m_net_code_to_subgraphs_map.clear();

View File

@ -38,6 +38,7 @@
#include <lib_table_grid.h>
#include <wildcards_and_files_ext.h>
#include <env_paths.h>
#include <functional>
#include <eeschema_id.h>
#include <symbol_edit_frame.h>
#include <symbol_viewer_frame.h>
@ -153,6 +154,13 @@ public:
{
}
SYMBOL_GRID_TRICKS( DIALOG_EDIT_LIBRARY_TABLES* aParent, WX_GRID* aGrid,
std::function<void( wxCommandEvent& )> aAddHandler ) :
LIB_TABLE_GRID_TRICKS( aGrid, aAddHandler ),
m_dialog( aParent )
{
}
protected:
DIALOG_EDIT_LIBRARY_TABLES* m_dialog;
@ -224,8 +232,21 @@ protected:
}
else
{
// paste spreadsheet formatted text.
GRID_TRICKS::paste_text( cb_text );
wxString text = cb_text;
if( !text.Contains( '\t' ) && text.Contains( ',' ) )
text.Replace( ',', '\t' );
if( text.Contains( '\t' ) )
{
int row = m_grid->GetGridCursorRow();
m_grid->ClearSelection();
m_grid->SelectRow( row );
m_grid->SetGridCursor( row, 0 );
getSelectedArea();
}
GRID_TRICKS::paste_text( text );
m_grid->AutoSizeColumns( false );
}
@ -250,7 +271,8 @@ void PANEL_SYM_LIB_TABLE::setupGrid( WX_GRID* aGrid )
};
// add Cut, Copy, and Paste to wxGrids
aGrid->PushEventHandler( new SYMBOL_GRID_TRICKS( m_parent, aGrid ) );
aGrid->PushEventHandler( new SYMBOL_GRID_TRICKS( m_parent, aGrid,
[this]( wxCommandEvent& event ) { appendRowHandler( event ); } ) );
aGrid->SetSelectionMode( wxGrid::wxGridSelectRows );

View File

@ -34,6 +34,9 @@
#include <string_utils.h>
#include <geometry/geometry_utils.h>
#include <schematic.h>
#include <sch_screen.h>
#include <sch_sheet.h>
#include <sch_sheet_pin.h>
#include <settings/color_settings.h>
#include <sch_painter.h>
#include <default_values.h>
@ -314,6 +317,82 @@ COLOR4D SCH_LABEL_BASE::GetLabelColor() const
}
void SCH_LABEL_BASE::SetLabelShape( LABEL_SHAPE aShape )
{
m_shape = (LABEL_FLAG_SHAPE) aShape;
static bool s_inUpdate = false;
if( s_inUpdate )
return;
s_inUpdate = true;
if( Type() == SCH_HIER_LABEL_T )
{
SCH_HIERLABEL* label = static_cast<SCH_HIERLABEL*>( this );
SCH_SCREEN* screen = static_cast<SCH_SCREEN*>( label->GetParent() );
if( screen )
{
const wxString& text = label->GetText();
for( SCH_ITEM* item : screen->Items().OfType( SCH_HIER_LABEL_T ) )
{
SCH_HIERLABEL* other = static_cast<SCH_HIERLABEL*>( item );
if( other != label && other->GetText() == text )
other->SetLabelShape( aShape );
}
for( const SCH_SHEET_PATH& sheetPath : screen->GetClientSheetPaths() )
{
SCH_SHEET* sheet = sheetPath.Last();
if( sheet )
{
for( SCH_SHEET_PIN* pin : sheet->GetPins() )
{
if( pin->GetText() == text )
pin->SetLabelShape( aShape );
}
}
}
}
}
else if( Type() == SCH_SHEET_PIN_T )
{
SCH_SHEET_PIN* pin = static_cast<SCH_SHEET_PIN*>( this );
SCH_SHEET* parent = pin->GetParent();
if( parent )
{
const wxString& text = pin->GetText();
SCH_SCREEN* screen = parent->GetScreen();
if( screen )
{
for( SCH_ITEM* item : screen->Items().OfType( SCH_HIER_LABEL_T ) )
{
SCH_HIERLABEL* hlabel = static_cast<SCH_HIERLABEL*>( item );
if( hlabel->GetText() == text )
hlabel->SetLabelShape( aShape );
}
}
for( SCH_SHEET_PIN* other : parent->GetPins() )
{
if( other != pin && other->GetText() == text )
other->SetLabelShape( aShape );
}
}
}
s_inUpdate = false;
}
void SCH_LABEL_BASE::SetSpinStyle( SPIN_STYLE aSpinStyle )
{
// Assume "Right" and Left" mean which side of the anchor the text will be on

View File

@ -173,12 +173,19 @@ public:
bool HasConnectivityChanges( const SCH_ITEM* aItem,
const SCH_SHEET_PATH* aInstance = nullptr ) const override;
LABEL_FLAG_SHAPE GetShape() const { return m_shape; }
void SetShape( LABEL_FLAG_SHAPE aShape ) { m_shape = aShape; }
// Type-specific versions for property manager
LABEL_SHAPE GetLabelShape() const { return (LABEL_SHAPE) m_shape; }
void SetLabelShape( LABEL_SHAPE aShape ) { m_shape = (LABEL_FLAG_SHAPE) aShape; }
void SetLabelShape( LABEL_SHAPE aShape );
LABEL_FLAG_SHAPE GetShape() const { return m_shape; }
void SetShape( LABEL_FLAG_SHAPE aShape )
{
// Set flags directly if a flag shape
if( aShape >= F_FIRST )
m_shape = aShape;
else
SetLabelShape( (LABEL_SHAPE) aShape );
}
COLOR4D GetLabelColor() const;

View File

@ -140,11 +140,10 @@ void SPICE_LIBRARY_PARSER::ReadFile( const wxString& aFilePath, REPORTER& aRepor
// Read all self-contained models in parallel
thread_pool& tp = GetKiCadThreadPool();
auto results = tp.parallelize_loop( modelQueue.size(),
[&]( const int a, const int b )
auto results = tp.submit_loop( 0, modelQueue.size(),
[&]( const int ii )
{
for( int ii = a; ii < b; ++ii )
createModel( ii, true );
createModel( ii, true );
} );
results.wait();

View File

@ -3097,6 +3097,7 @@ int SCH_DRAWING_TOOLS::DrawSheet( const TOOL_EVENT& aEvent )
KIGFX::VIEW_CONTROLS* controls = getViewControls();
EE_GRID_HELPER grid( m_toolMgr );
VECTOR2I cursorPos;
bool startedWithDrag = false; // Track if initial sheet placement started with a drag
m_toolMgr->RunAction( ACTIONS::selectionClear );
@ -3188,7 +3189,8 @@ int SCH_DRAWING_TOOLS::DrawSheet( const TOOL_EVENT& aEvent )
}
}
else if( !sheet && ( evt->IsClick( BUT_LEFT ) || evt->IsDblClick( BUT_LEFT )
|| evt->IsAction( &ACTIONS::cursorClick ) || evt->IsAction( &ACTIONS::cursorDblClick ) ) )
|| evt->IsAction( &ACTIONS::cursorClick ) || evt->IsAction( &ACTIONS::cursorDblClick )
|| evt->IsDrag( BUT_LEFT ) ) )
{
SCH_SELECTION& selection = m_selectionTool->GetSelection();
@ -3211,7 +3213,15 @@ int SCH_DRAWING_TOOLS::DrawSheet( const TOOL_EVENT& aEvent )
m_toolMgr->RunAction( ACTIONS::selectionClear );
sheet = new SCH_SHEET( m_frame->GetCurrentSheet().Last(), cursorPos );
VECTOR2I sheetPos = evt->IsDrag( BUT_LEFT ) ?
grid.Align( evt->DragOrigin(), GRID_HELPER_GRIDS::GRID_GRAPHICS ) :
cursorPos;
// Remember whether this sheet was initiated with a drag so we can treat mouse-up as
// the terminating (second) click.
startedWithDrag = evt->IsDrag( BUT_LEFT );
sheet = new SCH_SHEET( m_frame->GetCurrentSheet().Last(), sheetPos );
sheet->SetScreen( nullptr );
wxString ext = wxString( "." ) + FILEEXT::KiCadSchematicFileExtension;
@ -3265,10 +3275,11 @@ int SCH_DRAWING_TOOLS::DrawSheet( const TOOL_EVENT& aEvent )
m_view->ClearPreview();
m_view->AddToPreview( sheet->Clone() );
}
else if( sheet && ( evt->IsClick( BUT_LEFT ) || evt->IsDblClick( BUT_LEFT )
|| isSyntheticClick
|| evt->IsAction( &ACTIONS::cursorClick ) || evt->IsAction( &ACTIONS::cursorDblClick )
|| evt->IsAction( &ACTIONS::finishInteractive ) ) )
else if( sheet && ( evt->IsClick( BUT_LEFT ) || evt->IsDblClick( BUT_LEFT )
|| isSyntheticClick
|| evt->IsAction( &ACTIONS::cursorClick ) || evt->IsAction( &ACTIONS::cursorDblClick )
|| evt->IsAction( &ACTIONS::finishInteractive )
|| ( startedWithDrag && evt->IsMouseUp( BUT_LEFT ) ) ) )
{
getViewControls()->SetAutoPan( false );
getViewControls()->CaptureCursor( false );
@ -3338,7 +3349,8 @@ int SCH_DRAWING_TOOLS::DrawSheet( const TOOL_EVENT& aEvent )
evt->SetPassEvent();
break;
}
else if( sheet && ( evt->IsAction( &ACTIONS::refreshPreview ) || evt->IsMotion() ) )
else if( sheet && ( evt->IsAction( &ACTIONS::refreshPreview ) || evt->IsMotion()
|| evt->IsDrag( BUT_LEFT ) ) )
{
sizeSheet( sheet, cursorPos );
m_view->ClearPreview();

View File

@ -46,6 +46,9 @@
#include <pgm_base.h>
#include <view/view_controls.h>
#include <settings/settings_manager.h>
#include <math/box2.h>
#include <base_units.h>
#include <sch_screen.h>
#include "sch_move_tool.h"
@ -500,6 +503,7 @@ bool SCH_MOVE_TOOL::doMoveSelection( const TOOL_EVENT& aEvent, SCH_COMMIT* aComm
TOOL_EVENT* evt = &copy;
VECTOR2I prevPos;
GRID_HELPER_GRIDS snapLayer = GRID_CURRENT;
SCH_SHEET* hoverSheet = nullptr;
m_cursor = controls->GetCursorPosition();
@ -771,7 +775,65 @@ bool SCH_MOVE_TOOL::doMoveSelection( const TOOL_EVENT& aEvent, SCH_COMMIT* aComm
m_cursor = grid.BestSnapAnchor( controls->GetCursorPosition( false ),
snapLayer, selection );
// Determine potential target sheet.
SCH_SHEET* sheet = dynamic_cast<SCH_SHEET*>( m_frame->GetScreen()->GetItem( m_cursor, 0,
SCH_SHEET_T ) );
if( sheet && sheet->IsSelected() )
sheet = nullptr; // Never target a selected sheet
if( !sheet )
{
// Build current selection bounding box in its (already moved) position.
BOX2I selBBox;
for( EDA_ITEM* it : selection )
{
if( SCH_ITEM* schIt = dynamic_cast<SCH_ITEM*>( it ) )
selBBox.Merge( schIt->GetBoundingBox() );
}
if( selBBox.GetWidth() > 0 && selBBox.GetHeight() > 0 )
{
VECTOR2I selCenter( selBBox.GetX() + selBBox.GetWidth() / 2,
selBBox.GetY() + selBBox.GetHeight() / 2 );
// Find first non-selected sheet whose body fully contains the selection
// or at least contains its center point.
for( SCH_ITEM* it : m_frame->GetScreen()->Items().OfType( SCH_SHEET_T ) )
{
SCH_SHEET* candidate = static_cast<SCH_SHEET*>( it );
if( candidate->IsSelected() || candidate->IsRootSheet() )
continue;
BOX2I body = candidate->GetBodyBoundingBox();
if( body.Contains( selBBox ) || body.Contains( selCenter ) )
{
sheet = candidate;
break;
}
}
}
}
if( sheet != hoverSheet )
{
if( hoverSheet )
{
hoverSheet->ClearFlags( BRIGHTENED );
m_frame->UpdateItem( hoverSheet, false );
}
hoverSheet = sheet;
if( hoverSheet )
{
hoverSheet->SetFlags( BRIGHTENED );
m_frame->UpdateItem( hoverSheet, false );
}
}
m_frame->GetCanvas()->SetCurrentCursor( hoverSheet ? KICURSOR::PLACE
: KICURSOR::MOVING );
VECTOR2I delta( m_cursor - prevPos );
m_anchorPos = m_cursor;
@ -1032,6 +1094,22 @@ bool SCH_MOVE_TOOL::doMoveSelection( const TOOL_EVENT& aEvent, SCH_COMMIT* aComm
} while( ( evt = Wait() ) ); //Should be assignment not equality test
SCH_SHEET* targetSheet = hoverSheet;
if( hoverSheet )
{
hoverSheet->ClearFlags( BRIGHTENED );
m_frame->UpdateItem( hoverSheet, false );
}
if( targetSheet )
{
moveSelectionToSheet( selection, targetSheet, aCommit );
m_toolMgr->RunAction( ACTIONS::selectionClear );
m_newDragLines.clear();
m_changedDragLines.clear();
}
// Create a selection of original selection, drag selected/changed items, and new
// bend lines for later before we clear them in the aCommit. We'll need these
// to check for new junctions needed, etc.
@ -1135,6 +1213,60 @@ bool SCH_MOVE_TOOL::doMoveSelection( const TOOL_EVENT& aEvent, SCH_COMMIT* aComm
}
void SCH_MOVE_TOOL::moveSelectionToSheet( SCH_SELECTION& aSelection, SCH_SHEET* aTargetSheet,
SCH_COMMIT* aCommit )
{
SCH_SCREEN* destScreen = aTargetSheet->GetScreen();
SCH_SCREEN* srcScreen = m_frame->GetScreen();
BOX2I bbox;
for( EDA_ITEM* item : aSelection )
bbox.Merge( static_cast<SCH_ITEM*>( item )->GetBoundingBox() );
VECTOR2I offset = VECTOR2I( 0, 0 ) - bbox.GetPosition();
int step = schIUScale.MilsToIU( 50 );
bool overlap = false;
do
{
BOX2I moved = bbox;
moved.Move( offset );
overlap = false;
for( SCH_ITEM* existing : destScreen->Items() )
{
if( moved.Intersects( existing->GetBoundingBox() ) )
{
overlap = true;
break;
}
}
if( overlap )
offset += VECTOR2I( step, step );
} while( overlap );
for( EDA_ITEM* item : aSelection )
{
SCH_ITEM* schItem = static_cast<SCH_ITEM*>( item );
// Remove from current screen and view manually
m_frame->RemoveFromScreen( schItem, srcScreen );
// Move the item
schItem->Move( offset );
// Add to destination screen manually (won't add to view since it's not current)
destScreen->Append( schItem );
// Record in commit with CHT_DONE flag to bypass automatic screen/view operations
aCommit->Stage( schItem, CHT_REMOVE | CHT_DONE, srcScreen );
aCommit->Stage( schItem, CHT_ADD | CHT_DONE, destScreen );
}
}
void SCH_MOVE_TOOL::trimDanglingLines( SCH_COMMIT* aCommit )
{
// Need a local cleanup first to ensure we remove unneeded junctions
@ -1163,8 +1295,7 @@ void SCH_MOVE_TOOL::trimDanglingLines( SCH_COMMIT* aCommit )
{
line->SetFlags( STRUCT_DELETED );
aCommit->Removed( line, m_frame->GetScreen() );
updateItem( line, false );
updateItem( line, false ); // Update any cached visuals before commit processes
m_frame->RemoveFromScreen( line, m_frame->GetScreen() );
}
}

View File

@ -35,6 +35,9 @@ class SCH_LINE;
class SCH_LABEL_BASE;
class SCH_SHEET_PIN;
class SCH_JUNCTION;
class SCH_SELECTION;
class SCH_SHEET;
class SCH_COMMIT;
struct SPECIAL_CASE_LABEL_INFO
@ -81,6 +84,8 @@ private:
void orthoLineDrag( SCH_COMMIT* aCommit, SCH_LINE* line, const VECTOR2I& splitDelta,
int& xBendCount, int& yBendCount, const EE_GRID_HELPER& grid );
void moveSelectionToSheet( SCH_SELECTION& aSelection, SCH_SHEET* aTarget, SCH_COMMIT* aCommit );
///< Clears the new drag lines and removes them from the screen
void clearNewDragLines();

View File

@ -671,8 +671,7 @@ bool GERBER_FILE_IMAGE::ExecuteRS274XCommand( int aCommand, char* aBuff,
break;
case IMAGE_POLARITY:
// These commands are deprecated since 2012.
// So do nothing and prompt the user about this command
// Note: these commands IPPOS and IPNEG are deprecated since 2012.
if( strncasecmp( aText, "NEG", 3 ) == 0 )
{
m_ImageNegative = true;
@ -688,7 +687,6 @@ bool GERBER_FILE_IMAGE::ExecuteRS274XCommand( int aCommand, char* aBuff,
// actual effect. Just skip it.
}
ok = false;
break;
case LOAD_POLARITY:

View File

@ -18,6 +18,7 @@
*/
#include "grid_tricks.h"
#include <functional>
class LIB_TABLE_GRID_TRICKS : public GRID_TRICKS
{
@ -33,6 +34,7 @@ class LIB_TABLE_GRID_TRICKS : public GRID_TRICKS
public:
explicit LIB_TABLE_GRID_TRICKS( WX_GRID* aGrid );
LIB_TABLE_GRID_TRICKS( WX_GRID* aGrid, std::function<void( wxCommandEvent& )> aAddHandler );
virtual ~LIB_TABLE_GRID_TRICKS(){};
@ -43,5 +45,7 @@ protected:
virtual void optionsEditor( int aRow ) = 0;
bool handleDoubleClick( wxGridEvent& aEvent ) override;
void onCharHook( wxKeyEvent& ev );
virtual bool supportsVisibilityColumn() { return false; }
};

View File

@ -110,7 +110,7 @@ public:
*/
void BuildArgvUtf8();
BS::thread_pool& GetThreadPool() { return *m_singleton.m_ThreadPool; }
BS::thread_pool<0>& GetThreadPool() { return *m_singleton.m_ThreadPool; }
GL_CONTEXT_MANAGER* GetGLContextManager() { return m_singleton.m_GLContextManager; }

View File

@ -25,6 +25,7 @@
class GL_CONTEXT_MANAGER;
namespace BS
{
template <std::uint8_t>
class thread_pool;
}
@ -42,7 +43,7 @@ public:
void Init();
public:
BS::thread_pool* m_ThreadPool;
BS::thread_pool<0>* m_ThreadPool;
GL_CONTEXT_MANAGER* m_GLContextManager;
};

View File

@ -28,7 +28,7 @@
#include <bs_thread_pool.hpp>
#include <import_export.h>
using thread_pool = BS::thread_pool;
using thread_pool = BS::thread_pool<0>;
/**
* Get a reference to the current thread pool. N.B., you cannot copy the thread pool

View File

@ -26,11 +26,14 @@
#include <bitmaps.h>
#include <widgets/std_bitmap_button.h>
#include <widgets/ui_common.h>
#include <algorithm>
#include <wx_filename.h>
#include <wx/dir.h>
#include <wx/dirdlg.h>
#include <wx/settings.h>
#include <wx/bitmap.h>
#include <wx/image.h>
#include <wx/math.h>
#include "template_default_html.h"
// Welcome / fallback HTML now provided by template_default_html.h
@ -51,6 +54,56 @@ void TEMPLATE_SELECTION_PANEL::AddTemplateWidget( TEMPLATE_WIDGET* aTemplateWidg
}
// Sort the widgets alphabetically, leaving Default at the top
void TEMPLATE_SELECTION_PANEL::SortAlphabetically()
{
std::vector<TEMPLATE_WIDGET*> sortedList;
TEMPLATE_WIDGET* default_temp = nullptr;
size_t count = m_SizerChoice->GetItemCount();
if( count <= 1 )
return;
for( size_t idx = 0; idx < count; idx++ )
{
wxSizerItem* item = m_SizerChoice->GetItem( idx );
if( item && item->IsWindow() )
{
TEMPLATE_WIDGET* temp = static_cast<TEMPLATE_WIDGET*>( item->GetWindow() );
const wxString title = *temp->GetTemplate()->GetTitle();
if( default_temp == nullptr && title.CmpNoCase( "default" ) == 0 )
default_temp = temp;
else
sortedList.push_back( temp );
}
}
std::sort(
sortedList.begin(), sortedList.end(),
[]( TEMPLATE_WIDGET* aWidgetA, TEMPLATE_WIDGET* aWidgetB ) -> bool
{
const wxString* a = aWidgetA->GetTemplate()->GetTitle();
const wxString* b = aWidgetB->GetTemplate()->GetTitle();
return ( *a ).CmpNoCase( *b ) < 0;
});
m_SizerChoice->Clear( false );
if( default_temp != nullptr )
m_SizerChoice->Add( default_temp );
for (TEMPLATE_WIDGET* temp : sortedList)
{
m_SizerChoice->Add( temp );
}
Layout();
}
TEMPLATE_WIDGET::TEMPLATE_WIDGET( wxWindow* aParent, DIALOG_TEMPLATE_SELECTOR* aDialog ) :
TEMPLATE_WIDGET_BASE( aParent )
{
@ -99,7 +152,24 @@ void TEMPLATE_WIDGET::SetTemplate( PROJECT_TEMPLATE* aTemplate )
wxBitmap* icon = aTemplate->GetIcon();
if( icon && icon->IsOk() )
m_bitmapIcon->SetBitmap( *icon );
{
wxSize maxSize = m_bitmapIcon->GetSize();
if( icon->GetWidth() > maxSize.x || icon->GetHeight() > maxSize.y )
{
double scale = std::min( (double) maxSize.x / icon->GetWidth(),
(double) maxSize.y / icon->GetHeight() );
wxImage image = icon->ConvertToImage();
int w = wxRound( icon->GetWidth() * scale );
int h = wxRound( icon->GetHeight() * scale );
image.Rescale( w, h, wxIMAGE_QUALITY_HIGH );
m_bitmapIcon->SetBitmap( wxBitmap( image ) );
}
else
{
m_bitmapIcon->SetBitmap( *icon );
}
}
else
m_bitmapIcon->SetBitmap( KiBitmap( BITMAPS::icon_kicad ) );
}
@ -317,6 +387,7 @@ void DIALOG_TEMPLATE_SELECTOR::buildPageContent( const wxString& aPath, int aPag
}
}
m_panels[aPage]->SortAlphabetically();
Layout();
}

View File

@ -81,6 +81,8 @@ public:
void AddTemplateWidget( TEMPLATE_WIDGET* aTemplateWidget );
void SortAlphabetically();
protected:
wxNotebookPage* m_parent;
wxString m_templatesPath; ///< the path to access to the folder

View File

@ -636,7 +636,11 @@ void PROJECT_TREE_PANE::ReCreateTreePrj()
std::lock_guard<std::mutex> lock2( m_gitTreeCacheMutex );
thread_pool& tp = GetKiCadThreadPool();
tp.wait_for_tasks();
while( tp.get_tasks_running() )
{
tp.wait_for( std::chrono::milliseconds( 250 ) );
}
m_gitStatusTimer.Stop();
m_gitSyncTimer.Stop();
m_gitTreeCache.clear();
@ -2293,25 +2297,21 @@ void PROJECT_TREE_PANE::onGitSyncTimer( wxTimerEvent& aEvent )
thread_pool& tp = GetKiCadThreadPool();
tp.push_task(
[this]()
{
KIGIT_COMMON* gitCommon = m_TreeProject->GitCommon();
tp.submit_task( [this]()
{
KIGIT_COMMON* gitCommon = m_TreeProject->GitCommon();
if( !gitCommon )
{
wxLogTrace( traceGit, "onGitSyncTimer: No git repository found" );
return;
}
if( !gitCommon )
{
wxLogTrace( traceGit, "onGitSyncTimer: No git repository found" );
return;
}
GIT_PULL_HANDLER handler( gitCommon );
handler.PerformFetch();
GIT_PULL_HANDLER handler( gitCommon );
handler.PerformFetch();
CallAfter( [this]()
{
gitStatusTimerHandler();
} );
} );
CallAfter( [this]() { gitStatusTimerHandler(); } );
} );
if( gitSettings.updatInterval > 0 )
{
@ -2327,11 +2327,7 @@ void PROJECT_TREE_PANE::gitStatusTimerHandler()
updateTreeCache();
thread_pool& tp = GetKiCadThreadPool();
tp.push_task(
[this]()
{
updateGitStatusIconMap();
} );
tp.submit_task( [this]() { updateGitStatusIconMap(); } );
}
void PROJECT_TREE_PANE::onGitStatusTimer( wxTimerEvent& aEvent )

View File

@ -274,5 +274,5 @@ void UPDATE_MANAGER::CheckForUpdate( wxWindow* aNoticeParent )
};
thread_pool& tp = GetKiCadThreadPool();
m_updateTask = tp.submit( update_check );
m_updateTask = tp.submit_task( update_check );
}

View File

@ -154,6 +154,12 @@ void KIPLATFORM::UI::ReparentModal( wxNonOwnedWindow* aWindow )
}
void KIPLATFORM::UI::ReparentWindow( wxNonOwnedWindow* aWindow, wxTopLevelWindow* aParent )
{
// Not needed on this platform (only relevant for macOS child window ordering)
}
void KIPLATFORM::UI::FixupCancelButtonCmdKeyCollision( wxWindow *aWindow )
{
// Not needed on this platform

View File

@ -88,6 +88,12 @@ void KIPLATFORM::UI::ReparentModal( wxNonOwnedWindow* aWindow )
}
void KIPLATFORM::UI::ReparentWindow( wxNonOwnedWindow* aWindow, wxTopLevelWindow* aParent )
{
// Not needed on this platform (used only on macOS for child window ordering)
}
void KIPLATFORM::UI::FixupCancelButtonCmdKeyCollision( wxWindow *aWindow )
{
// Not needed on this platform

View File

@ -1114,7 +1114,7 @@ void BOARD::CacheTriangulation( PROGRESS_REPORTER* aReporter, const std::vector<
};
for( ZONE* zone : zones )
returns.emplace_back( tp.submit( cache_zones, zone ) );
returns.emplace_back( tp.submit_task( [cache_zones, zone] { return cache_zones( zone ); } ) );
// Finalize the triangulation threads
for( const std::future<size_t>& ret : returns )

View File

@ -270,24 +270,21 @@ void CN_CONNECTIVITY_ALGO::searchConnections()
{
std::vector<std::future<size_t>> returns( dirtyItems.size() );
auto conn_lambda =
[&dirtyItems]( size_t aItem, CN_LIST* aItemList,
PROGRESS_REPORTER* aReporter) -> size_t
{
if( aReporter && aReporter->IsCancelled() )
for( size_t ii = 0; ii < dirtyItems.size(); ++ii )
{
returns[ii] = tp.submit_task(
[&dirtyItems, ii, this] () ->size_t {
if( m_progressReporter && m_progressReporter->IsCancelled() )
return 0;
CN_VISITOR visitor( dirtyItems[aItem] );
aItemList->FindNearby( dirtyItems[aItem], visitor );
CN_VISITOR visitor( dirtyItems[ii] );
m_itemList.FindNearby( dirtyItems[ii], visitor );
if( aReporter )
aReporter->AdvanceProgress();
if( m_progressReporter )
m_progressReporter->AdvanceProgress();
return 1;
};
for( size_t ii = 0; ii < dirtyItems.size(); ++ii )
returns[ii] = tp.submit( conn_lambda, ii, &m_itemList, m_progressReporter );
return 1; } );
}
for( const std::future<size_t>& ret : returns )
{
@ -490,7 +487,11 @@ void CN_CONNECTIVITY_ALGO::Build( BOARD* aBoard, PROGRESS_REPORTER* aReporter )
};
for( size_t ii = 0; ii < zitems.size(); ++ii )
returns[ii] = tp.submit( cache_zones, zitems[ii] );
{
CN_ZONE_LAYER* ptr = zitems[ii];
returns[ii] = tp.submit_task(
[cache_zones, ptr] { return cache_zones( ptr ); } );
}
for( const std::future<size_t>& ret : returns )
{

View File

@ -191,19 +191,17 @@ void CONNECTIVITY_DATA::updateRatsnest()
thread_pool& tp = GetKiCadThreadPool();
auto results = tp.parallelize_loop( dirty_nets.size(),
[&]( const int a, const int b )
auto results = tp.submit_loop( 0, dirty_nets.size(),
[&]( const int ii )
{
for( int ii = a; ii < b; ++ii )
dirty_nets[ii]->UpdateNet();
dirty_nets[ii]->UpdateNet();
} );
results.wait();
auto results2 = tp.parallelize_loop( dirty_nets.size(),
[&]( const int a, const int b )
auto results2 = tp.submit_loop( 0, dirty_nets.size(),
[&]( const int ii )
{
for( int ii = a; ii < b; ++ii )
dirty_nets[ii]->OptimizeRNEdges();
dirty_nets[ii]->OptimizeRNEdges();
} );
results2.wait();
@ -370,11 +368,10 @@ void CONNECTIVITY_DATA::ComputeLocalRatsnest( const std::vector<BOARD_ITEM*>& aI
thread_pool& tp = GetKiCadThreadPool();
size_t num_nets = std::min( m_nets.size(), aDynamicData->m_nets.size() );
auto results = tp.parallelize_loop( 1, num_nets,
[&]( const int a, const int b)
auto results = tp.submit_loop( 1, num_nets,
[&]( const int ii )
{
for( int ii = a; ii < b; ++ii )
update_lambda( ii );
update_lambda( ii );
});
results.wait();

View File

@ -425,7 +425,7 @@ void DIALOG_EXPORT_ODBPP::GenerateODBPPFiles( const JOB_EXPORT_PCB_ODB& aJob, BO
};
thread_pool& tp = GetKiCadThreadPool();
auto ret = tp.submit( saveFile );
auto ret = tp.submit_task( saveFile );
std::future_status status = ret.wait_for( std::chrono::milliseconds( 250 ) );

View File

@ -674,6 +674,7 @@ bool DIALOG_FOOTPRINT_PROPERTIES_FP_EDITOR::Validate()
// Check that the user isn't trying to remove a layer that is used by the footprint
usedLayers &= ~getCustomLayersFromControls();
usedLayers &= ~LSET::AllTechMask();
usedLayers &= ~LSET::UserMask();
if( usedLayers.any() )
{

View File

@ -38,6 +38,7 @@
#include <wx/dirdlg.h>
#include <wx/filedlg.h>
#include <wx/msgdlg.h>
#include <functional>
#include <project.h>
#include <env_vars.h>
@ -147,6 +148,12 @@ public:
m_dialog( aParent )
{ }
FP_GRID_TRICKS( DIALOG_EDIT_LIBRARY_TABLES* aParent, WX_GRID* aGrid,
std::function<void( wxCommandEvent& )> aAddHandler ) :
LIB_TABLE_GRID_TRICKS( aGrid, aAddHandler ),
m_dialog( aParent )
{ }
protected:
DIALOG_EDIT_LIBRARY_TABLES* m_dialog;
@ -217,8 +224,21 @@ protected:
}
else
{
// paste spreadsheet formatted text.
GRID_TRICKS::paste_text( cb_text );
wxString text = cb_text;
if( !text.Contains( '\t' ) && text.Contains( ',' ) )
text.Replace( ',', '\t' );
if( text.Contains( '\t' ) )
{
int row = m_grid->GetGridCursorRow();
m_grid->ClearSelection();
m_grid->SelectRow( row );
m_grid->SetGridCursor( row, 0 );
getSelectedArea();
}
GRID_TRICKS::paste_text( text );
m_grid->AutoSizeColumns( false );
}
@ -254,7 +274,8 @@ void PANEL_FP_LIB_TABLE::setupGrid( WX_GRID* aGrid )
aGrid->SetRowSize( ii, aGrid->GetDefaultRowSize() + 4 );
// add Cut, Copy, and Paste to wxGrids
aGrid->PushEventHandler( new FP_GRID_TRICKS( m_parent, aGrid ) );
aGrid->PushEventHandler( new FP_GRID_TRICKS( m_parent, aGrid,
[this]( wxCommandEvent& event ) { appendRowHandler( event ); } ) );
aGrid->SetSelectionMode( wxGrid::wxGridSelectRows );

View File

@ -158,7 +158,7 @@ bool DRC_CACHE_GENERATOR::Run()
forEachGeometryItem( itemTypes, boardCopperLayers, countItems );
std::future<void> retn = tp.submit(
std::future<void> retn = tp.submit_task(
[&]()
{
std::unique_lock<std::shared_mutex> writeLock( m_board->m_CachesMutex );
@ -225,7 +225,7 @@ bool DRC_CACHE_GENERATOR::Run()
};
for( ZONE* zone : allZones )
returns.emplace_back( tp.submit( cache_zones, zone ) );
returns.emplace_back( tp.submit_task( [cache_zones, zone] { return cache_zones( zone ); } ) );
done.store( 1 );

View File

@ -2317,56 +2317,54 @@ void CREEPAGE_GRAPH::GeneratePaths( double aMaxWeight, PCB_LAYER_ID aLayer )
}
}
auto processWorkItems = [&]( size_t start_idx, size_t end_idx ) -> bool
auto processWorkItems = [&]( size_t idx ) -> bool
{
for( size_t idx = start_idx; idx < end_idx; ++idx )
auto [gn1, gn2] = work_items[idx];
for( PATH_CONNECTION pc : GetPaths( gn1->m_parent, gn2->m_parent, aMaxWeight ) )
{
auto [gn1, gn2] = work_items[idx];
std::vector<const BOARD_ITEM*> IgnoreForTest = {
gn1->m_parent->GetParent(), gn2->m_parent->GetParent()
};
for( PATH_CONNECTION pc : GetPaths( gn1->m_parent, gn2->m_parent, aMaxWeight ) )
if( !pc.isValid( m_board, aLayer, m_boardEdge, IgnoreForTest, m_boardOutline,
{ false, true }, m_minGrooveWidth ) )
continue;
std::shared_ptr<GRAPH_NODE> connect1 = gn1, connect2 = gn2;
std::lock_guard<std::mutex> lock( nodes_lock );
// Handle non-point node1
if( gn1->m_parent->GetType() != CREEP_SHAPE::TYPE::POINT )
{
std::vector<const BOARD_ITEM*> IgnoreForTest = {
gn1->m_parent->GetParent(), gn2->m_parent->GetParent()
};
auto gnt1 = AddNode( GRAPH_NODE::POINT, gn1->m_parent, pc.a1 );
gnt1->m_connectDirectly = false;
connect1 = gnt1;
if( !pc.isValid( m_board, aLayer, m_boardEdge, IgnoreForTest, m_boardOutline,
{ false, true }, m_minGrooveWidth ) )
continue;
std::shared_ptr<GRAPH_NODE> connect1 = gn1, connect2 = gn2;
std::lock_guard<std::mutex> lock( nodes_lock );
// Handle non-point node1
if( gn1->m_parent->GetType() != CREEP_SHAPE::TYPE::POINT )
if( gn1->m_parent->IsConductive() )
{
auto gnt1 = AddNode( GRAPH_NODE::POINT, gn1->m_parent, pc.a1 );
gnt1->m_connectDirectly = false;
connect1 = gnt1;
if( gn1->m_parent->IsConductive() )
{
if( auto gc = AddConnection( gn1, gnt1 ) )
gc->m_path.m_show = false;
}
if( auto gc = AddConnection( gn1, gnt1 ) )
gc->m_path.m_show = false;
}
// Handle non-point node2
if( gn2->m_parent->GetType() != CREEP_SHAPE::TYPE::POINT )
{
auto gnt2 = AddNode( GRAPH_NODE::POINT, gn2->m_parent, pc.a2 );
gnt2->m_connectDirectly = false;
connect2 = gnt2;
if( gn2->m_parent->IsConductive() )
{
if( auto gc = AddConnection( gn2, gnt2 ) )
gc->m_path.m_show = false;
}
}
AddConnection( connect1, connect2, pc );
}
// Handle non-point node2
if( gn2->m_parent->GetType() != CREEP_SHAPE::TYPE::POINT )
{
auto gnt2 = AddNode( GRAPH_NODE::POINT, gn2->m_parent, pc.a2 );
gnt2->m_connectDirectly = false;
connect2 = gnt2;
if( gn2->m_parent->IsConductive() )
{
if( auto gc = AddConnection( gn2, gnt2 ) )
gc->m_path.m_show = false;
}
}
AddConnection( connect1, connect2, pc );
}
return true;
};
@ -2374,15 +2372,16 @@ void CREEPAGE_GRAPH::GeneratePaths( double aMaxWeight, PCB_LAYER_ID aLayer )
// has already parallelized the work, so we can process all items in one go.
if( tp.get_tasks_total() >= tp.get_thread_count() - 4 )
{
processWorkItems( 0, work_items.size() );
for( size_t ii = 0; ii < work_items.size(); ii++ )
processWorkItems( ii );
}
else
{
auto ret = tp.parallelize_loop( work_items.size(), processWorkItems );
auto ret = tp.submit_loop( 0, work_items.size(), processWorkItems );
for( size_t ii = 0; ii < ret.size(); ii++ )
{
std::future<bool>& r = ret[ii];
auto& r = ret[ii];
if( !r.valid() )
continue;

View File

@ -505,7 +505,6 @@ bool DRC_TEST_PROVIDER_CONNECTION_WIDTH::Run()
}
thread_pool& tp = GetKiCadThreadPool();
std::vector<std::future<size_t>> returns;
size_t total_effort = 0;
for( const auto& [ netLayer, itemsPoly ] : dataset )
@ -513,14 +512,16 @@ bool DRC_TEST_PROVIDER_CONNECTION_WIDTH::Run()
total_effort += std::max( (size_t) 1, total_effort ) * distinctMinWidths.size();
std::vector<std::future<size_t>> returns;
returns.reserve( dataset.size() );
for( const auto& [ netLayer, itemsPoly ] : dataset )
{
returns.emplace_back( tp.submit( build_netlayer_polys, netLayer.Netcode, netLayer.Layer ) );
int netcode = netLayer.Netcode;
PCB_LAYER_ID layer = netLayer.Layer;
returns.emplace_back( tp.submit_task( [&, netcode, layer]() { return build_netlayer_polys( netcode, layer ); } ) );
}
for( std::future<size_t>& ret : returns )
for( auto& ret : returns )
{
std::future_status status = ret.wait_for( std::chrono::milliseconds( 250 ) );
@ -541,11 +542,13 @@ bool DRC_TEST_PROVIDER_CONNECTION_WIDTH::Run()
if( minWidth - epsilon <= 0 )
continue;
returns.emplace_back( tp.submit( min_checker, itemsPoly, netLayer.Layer, minWidth ) );
returns.emplace_back( tp.submit_task( [min_checker, &itemsPoly, &netLayer, minWidth]() {
return min_checker( itemsPoly, netLayer.Layer, minWidth );
} ) );
}
}
for( std::future<size_t>& ret : returns )
for( auto& ret : returns )
{
std::future_status status = ret.wait_for( std::chrono::milliseconds( 250 ) );

View File

@ -594,115 +594,112 @@ void DRC_TEST_PROVIDER_COPPER_CLEARANCE::testTrackClearances()
LSET boardCopperLayers = LSET::AllCuMask( m_board->GetCopperLayerCount() );
auto testTrack = [&]( const int start_idx, const int end_idx )
auto testTrack = [&]( const int trackIdx )
{
for( int trackIdx = start_idx; trackIdx < end_idx; ++trackIdx )
PCB_TRACK* track = m_board->Tracks()[trackIdx];
for( PCB_LAYER_ID layer : LSET( track->GetLayerSet() & boardCopperLayers ) )
{
PCB_TRACK* track = m_board->Tracks()[trackIdx];
std::shared_ptr<SHAPE> trackShape = track->GetEffectiveShape( layer );
for( PCB_LAYER_ID layer : LSET( track->GetLayerSet() & boardCopperLayers ) )
m_board->m_CopperItemRTreeCache->QueryColliding( track, layer, layer,
// Filter:
[&]( BOARD_ITEM* other ) -> bool
{
BOARD_CONNECTED_ITEM* otherCItem = dynamic_cast<BOARD_CONNECTED_ITEM*>( other );
if( otherCItem && otherCItem->GetNetCode() == track->GetNetCode() )
return false;
BOARD_ITEM* a = track;
BOARD_ITEM* b = other;
// store canonical order so we don't collide in both directions
// (a:b and b:a)
if( static_cast<void*>( a ) > static_cast<void*>( b ) )
std::swap( a, b );
std::lock_guard<std::mutex> lock( checkedPairsMutex );
auto it = checkedPairs.find( { a, b } );
if( it != checkedPairs.end()
&& ( it->second.layers.test( layer ) || ( it->second.has_error ) ) )
{
return false;
}
else
{
checkedPairs[ { a, b } ].layers.set( layer );
return true;
}
},
// Visitor:
[&]( BOARD_ITEM* other ) -> bool
{
if( m_drcEngine->IsCancelled() )
return false;
if( other->Type() == PCB_PAD_T && static_cast<PAD*>( other )->IsFreePad() )
{
if( other->GetEffectiveShape( layer )->Collide( trackShape.get() ) )
{
std::lock_guard<std::mutex> lock( freePadsUsageMapMutex );
auto it = freePadsUsageMap.find( other );
if( it == freePadsUsageMap.end() )
{
freePadsUsageMap[ other ] = track->GetNetCode();
return true; // Continue colliding tests
}
else if( it->second == track->GetNetCode() )
{
return true; // Continue colliding tests
}
}
}
// If we get an error, mark the pair as having a clearance error already
if( !testSingleLayerItemAgainstItem( track, trackShape.get(), layer, other ) )
{
if( !m_drcEngine->GetReportAllTrackErrors() )
{
BOARD_ITEM* a = track;
BOARD_ITEM* b = other;
// store canonical order so we don't collide in both directions
// (a:b and b:a)
if( static_cast<void*>( a ) > static_cast<void*>( b ) )
std::swap( a, b );
std::lock_guard<std::mutex> lock( checkedPairsMutex );
auto it = checkedPairs.find( { a, b } );
if( it != checkedPairs.end() )
it->second.has_error = true;
return false; // We're done with this track
}
}
return !m_drcEngine->IsCancelled();
},
m_board->m_DRCMaxClearance );
for( ZONE* zone : m_board->m_DRCCopperZones )
{
std::shared_ptr<SHAPE> trackShape = track->GetEffectiveShape( layer );
testItemAgainstZone( track, zone, layer );
m_board->m_CopperItemRTreeCache->QueryColliding( track, layer, layer,
// Filter:
[&]( BOARD_ITEM* other ) -> bool
{
BOARD_CONNECTED_ITEM* otherCItem = dynamic_cast<BOARD_CONNECTED_ITEM*>( other );
if( otherCItem && otherCItem->GetNetCode() == track->GetNetCode() )
return false;
BOARD_ITEM* a = track;
BOARD_ITEM* b = other;
// store canonical order so we don't collide in both directions
// (a:b and b:a)
if( static_cast<void*>( a ) > static_cast<void*>( b ) )
std::swap( a, b );
std::lock_guard<std::mutex> lock( checkedPairsMutex );
auto it = checkedPairs.find( { a, b } );
if( it != checkedPairs.end()
&& ( it->second.layers.test( layer ) || ( it->second.has_error ) ) )
{
return false;
}
else
{
checkedPairs[ { a, b } ].layers.set( layer );
return true;
}
},
// Visitor:
[&]( BOARD_ITEM* other ) -> bool
{
if( m_drcEngine->IsCancelled() )
return false;
if( other->Type() == PCB_PAD_T && static_cast<PAD*>( other )->IsFreePad() )
{
if( other->GetEffectiveShape( layer )->Collide( trackShape.get() ) )
{
std::lock_guard<std::mutex> lock( freePadsUsageMapMutex );
auto it = freePadsUsageMap.find( other );
if( it == freePadsUsageMap.end() )
{
freePadsUsageMap[ other ] = track->GetNetCode();
return true; // Continue colliding tests
}
else if( it->second == track->GetNetCode() )
{
return true; // Continue colliding tests
}
}
}
// If we get an error, mark the pair as having a clearance error already
if( !testSingleLayerItemAgainstItem( track, trackShape.get(), layer, other ) )
{
if( !m_drcEngine->GetReportAllTrackErrors() )
{
BOARD_ITEM* a = track;
BOARD_ITEM* b = other;
// store canonical order so we don't collide in both directions
// (a:b and b:a)
if( static_cast<void*>( a ) > static_cast<void*>( b ) )
std::swap( a, b );
std::lock_guard<std::mutex> lock( checkedPairsMutex );
auto it = checkedPairs.find( { a, b } );
if( it != checkedPairs.end() )
it->second.has_error = true;
return false; // We're done with this track
}
}
return !m_drcEngine->IsCancelled();
},
m_board->m_DRCMaxClearance );
for( ZONE* zone : m_board->m_DRCCopperZones )
{
testItemAgainstZone( track, zone, layer );
if( m_drcEngine->IsCancelled() )
break;
}
if( m_drcEngine->IsCancelled() )
break;
}
done.fetch_add( 1 );
}
done.fetch_add( 1 );
};
thread_pool& tp = GetKiCadThreadPool();
tp.push_loop( m_board->Tracks().size(), testTrack );
auto track_futures = tp.submit_loop( 0, m_board->Tracks().size(), testTrack );
while( done < count )
{
@ -710,7 +707,8 @@ void DRC_TEST_PROVIDER_COPPER_CLEARANCE::testTrackClearances()
if( m_drcEngine->IsCancelled() )
{
tp.wait_for_tasks();
// Wait for the submitted loop tasks to finish
track_futures.wait();
break;
}
@ -967,82 +965,79 @@ void DRC_TEST_PROVIDER_COPPER_CLEARANCE::testPadClearances( )
LSET boardCopperLayers = LSET::AllCuMask( m_board->GetCopperLayerCount() );
const auto fp_check = [&]( size_t aFromIdx, size_t aToIdx )
const auto fp_check = [&]( size_t ii )
{
for( size_t ii = aFromIdx; ii < aToIdx; ++ii )
FOOTPRINT* footprint = m_board->Footprints()[ ii ];
for( PAD* pad : footprint->Pads() )
{
FOOTPRINT* footprint = m_board->Footprints()[ ii ];
for( PAD* pad : footprint->Pads() )
for( PCB_LAYER_ID layer : LSET( pad->GetLayerSet() & boardCopperLayers ) )
{
for( PCB_LAYER_ID layer : LSET( pad->GetLayerSet() & boardCopperLayers ) )
{
if( m_drcEngine->IsCancelled() )
return;
if( m_drcEngine->IsCancelled() )
return;
std::shared_ptr<SHAPE> padShape = pad->GetEffectiveShape( layer );
std::shared_ptr<SHAPE> padShape = pad->GetEffectiveShape( layer );
m_board->m_CopperItemRTreeCache->QueryColliding( pad, layer, layer,
// Filter:
[&]( BOARD_ITEM* other ) -> bool
m_board->m_CopperItemRTreeCache->QueryColliding( pad, layer, layer,
// Filter:
[&]( BOARD_ITEM* other ) -> bool
{
BOARD_ITEM* a = pad;
BOARD_ITEM* b = other;
// store canonical order so we don't collide in both
// directions (a:b and b:a)
if( static_cast<void*>( a ) > static_cast<void*>( b ) )
std::swap( a, b );
std::lock_guard<std::mutex> lock( checkedPairsMutex );
auto it = checkedPairs.find( { a, b } );
if( it != checkedPairs.end()
&& ( it->second.layers.test( layer ) || it->second.has_error ) )
{
return false;
}
else
{
checkedPairs[ { a, b } ].layers.set( layer );
return true;
}
},
// Visitor
[&]( BOARD_ITEM* other ) -> bool
{
if( !testPadAgainstItem( pad, padShape.get(), layer, other ) )
{
BOARD_ITEM* a = pad;
BOARD_ITEM* b = other;
// store canonical order so we don't collide in both
// directions (a:b and b:a)
if( static_cast<void*>( a ) > static_cast<void*>( b ) )
std::swap( a, b );
std::lock_guard<std::mutex> lock( checkedPairsMutex );
auto it = checkedPairs.find( { a, b } );
if( it != checkedPairs.end()
&& ( it->second.layers.test( layer ) || it->second.has_error ) )
{
return false;
}
else
{
checkedPairs[ { a, b } ].layers.set( layer );
return true;
}
},
// Visitor
[&]( BOARD_ITEM* other ) -> bool
{
if( !testPadAgainstItem( pad, padShape.get(), layer, other ) )
{
BOARD_ITEM* a = pad;
BOARD_ITEM* b = other;
if( it != checkedPairs.end() )
it->second.has_error = true;
}
std::lock_guard<std::mutex> lock( checkedPairsMutex );
auto it = checkedPairs.find( { a, b } );
return !m_drcEngine->IsCancelled();
},
m_board->m_DRCMaxClearance );
if( it != checkedPairs.end() )
it->second.has_error = true;
}
for( ZONE* zone : m_board->m_DRCCopperZones )
{
testItemAgainstZone( pad, zone, layer );
return !m_drcEngine->IsCancelled();
},
m_board->m_DRCMaxClearance );
for( ZONE* zone : m_board->m_DRCCopperZones )
{
testItemAgainstZone( pad, zone, layer );
if( m_drcEngine->IsCancelled() )
return;
}
if( m_drcEngine->IsCancelled() )
return;
}
}
done.fetch_add( 1 );
}
done.fetch_add( 1 );
};
size_t numFootprints = m_board->Footprints().size();
auto returns = tp.parallelize_loop( numFootprints, fp_check );
auto returns = tp.submit_loop( 0, numFootprints, fp_check );
// Wait for all threads to finish
for( size_t ii = 0; ii < returns.size(); ++ii )
@ -1152,7 +1147,7 @@ void DRC_TEST_PROVIDER_COPPER_CLEARANCE::testGraphicClearances()
m_board->m_DRCMaxClearance );
};
std::future<void> retn = tp.submit(
std::future<void> retn = tp.submit_task(
[&]()
{
for( BOARD_ITEM* item : m_board->Drawings() )
@ -1370,7 +1365,7 @@ void DRC_TEST_PROVIDER_COPPER_CLEARANCE::testZonesToZones()
continue;
count++;
tp.push_task( checkZones, ia, ia2, sameNet, layer );
tp.submit_task( [checkZones, ia, ia2, sameNet, layer]() { checkZones(ia, ia2, sameNet, layer); } );
}
}
}
@ -1382,7 +1377,7 @@ void DRC_TEST_PROVIDER_COPPER_CLEARANCE::testZonesToZones()
if( m_drcEngine->IsCancelled() )
break;
if( tp.wait_for_tasks_duration( std::chrono::milliseconds( 250 ) ) )
if( tp.wait_for( std::chrono::milliseconds( 250 ) ) )
break;
}

View File

@ -110,11 +110,11 @@ bool DRC_TEST_PROVIDER_DISALLOW::Run()
}
auto query_areas =
[&]( std::pair<ZONE* /* rule area */, ZONE* /* copper zone */> areaZonePair ) -> size_t
[&]( const int idx ) -> size_t
{
if( m_drcEngine->IsCancelled() )
return 0;
const auto& areaZonePair = toCache[idx];
ZONE* ruleArea = areaZonePair.first;
ZONE* copperZone = areaZonePair.second;
BOX2I areaBBox = ruleArea->GetBoundingBox();
@ -169,14 +169,9 @@ bool DRC_TEST_PROVIDER_DISALLOW::Run()
};
thread_pool& tp = GetKiCadThreadPool();
std::vector<std::future<size_t>> returns;
auto futures = tp.submit_loop( 0, toCache.size(), query_areas );
returns.reserve( toCache.size() );
for( const std::pair<ZONE*, ZONE*>& areaZonePair : toCache )
returns.emplace_back( tp.submit( query_areas, areaZonePair ) );
for( const std::future<size_t>& ret : returns )
for( auto& ret : futures )
{
std::future_status status = ret.wait_for( std::chrono::milliseconds( 250 ) );

View File

@ -149,14 +149,10 @@ bool DRC_TEST_PROVIDER_SLIVER_CHECKER::Run()
};
thread_pool& tp = GetKiCadThreadPool();
std::vector<std::future<size_t>> returns;
returns.reserve( copperLayers.size() );
auto returns = tp.submit_loop( 0, copperLayers.size(), build_layer_polys );
for( size_t ii = 0; ii < copperLayers.size(); ++ii )
returns.emplace_back( tp.submit( build_layer_polys, ii ) );
for( const std::future<size_t>& ret : returns )
for( auto& ret : returns )
{
std::future_status status = ret.wait_for( std::chrono::milliseconds( 250 ) );

View File

@ -737,50 +737,47 @@ void DRC_TEST_PROVIDER_SOLDER_MASK::testMaskBridges()
thread_pool& tp = GetKiCadThreadPool();
auto returns = tp.parallelize_loop( test_items.size(), [&]( size_t a, size_t b ) -> bool
auto returns = tp.submit_loop( 0, test_items.size(), [&]( size_t i ) -> bool
{
BOARD_ITEM* item = test_items[ i ];
if( m_drcEngine->IsErrorLimitExceeded( DRCE_SOLDERMASK_BRIDGE ) )
return false;
BOX2I itemBBox = item->GetBoundingBox();
if( item->IsOnLayer( F_Mask ) && !isNullAperture( item ) )
{
for( size_t i = a; i < b; ++i )
{
BOARD_ITEM* item = test_items[ i ];
// Test for aperture-to-aperture collisions
testItemAgainstItems( item, itemBBox, F_Mask, F_Mask );
if( m_drcEngine->IsErrorLimitExceeded( DRCE_SOLDERMASK_BRIDGE ) )
return false;
// Test for aperture-to-zone collisions
testMaskItemAgainstZones( item, itemBBox, F_Mask, F_Cu );
}
else if( item->IsOnLayer( PADSTACK::ALL_LAYERS ) )
{
// Test for copper-item-to-aperture collisions
testItemAgainstItems( item, itemBBox, F_Cu, F_Mask );
}
BOX2I itemBBox = item->GetBoundingBox();
if( item->IsOnLayer( B_Mask ) && !isNullAperture( item ) )
{
// Test for aperture-to-aperture collisions
testItemAgainstItems( item, itemBBox, B_Mask, B_Mask );
if( item->IsOnLayer( F_Mask ) && !isNullAperture( item ) )
{
// Test for aperture-to-aperture collisions
testItemAgainstItems( item, itemBBox, F_Mask, F_Mask );
// Test for aperture-to-zone collisions
testMaskItemAgainstZones( item, itemBBox, B_Mask, B_Cu );
}
else if( item->IsOnLayer( B_Cu ) )
{
// Test for copper-item-to-aperture collisions
testItemAgainstItems( item, itemBBox, B_Cu, B_Mask );
}
// Test for aperture-to-zone collisions
testMaskItemAgainstZones( item, itemBBox, F_Mask, F_Cu );
}
else if( item->IsOnLayer( PADSTACK::ALL_LAYERS ) )
{
// Test for copper-item-to-aperture collisions
testItemAgainstItems( item, itemBBox, F_Cu, F_Mask );
}
++count;
if( item->IsOnLayer( B_Mask ) && !isNullAperture( item ) )
{
// Test for aperture-to-aperture collisions
testItemAgainstItems( item, itemBBox, B_Mask, B_Mask );
// Test for aperture-to-zone collisions
testMaskItemAgainstZones( item, itemBBox, B_Mask, B_Cu );
}
else if( item->IsOnLayer( B_Cu ) )
{
// Test for copper-item-to-aperture collisions
testItemAgainstItems( item, itemBBox, B_Cu, B_Mask );
}
++count;
}
return true;
} );
return true;
} );
for( size_t i = 0; i < returns.size(); ++i )
{

View File

@ -70,13 +70,15 @@ bool DRC_TEST_PROVIDER_TRACK_ANGLE::Run()
return false; // DRC cancelled
auto checkTrackAngle =
[&]( PCB_TRACK* item ) -> bool
[&]( const int ind ) -> bool
{
if( m_drcEngine->IsErrorLimitExceeded( DRCE_TRACK_ANGLE ) )
{
return false;
}
PCB_TRACK* item = m_drcEngine->GetBoard()->Tracks()[ind];
if( item->Type() != PCB_TRACE_T )
{
return true;
@ -195,17 +197,10 @@ bool DRC_TEST_PROVIDER_TRACK_ANGLE::Run()
const int progressDelta = 250;
int ii = 0;
thread_pool& tp = GetKiCadThreadPool();
std::vector<std::future<bool>> returns;
thread_pool& tp = GetKiCadThreadPool();
auto futures = tp.submit_loop( 0, m_drcEngine->GetBoard()->Tracks().size(), checkTrackAngle );
returns.reserve( m_drcEngine->GetBoard()->Tracks().size() );
for( PCB_TRACK* item : m_drcEngine->GetBoard()->Tracks() )
{
returns.emplace_back( tp.submit( checkTrackAngle, item ) );
}
for( std::future<bool>& ret : returns )
for( auto& ret : futures )
{
std::future_status status = ret.wait_for( std::chrono::milliseconds( 250 ) );

View File

@ -67,8 +67,9 @@ bool DRC_TEST_PROVIDER_TRACK_SEGMENT_LENGTH::Run()
return false; // DRC cancelled
auto checkTrackSegmentLength =
[&]( BOARD_ITEM* item ) -> bool
[&]( const int idx ) -> bool
{
BOARD_ITEM* item = m_drcEngine->GetBoard()->Tracks()[idx];
if( m_drcEngine->IsErrorLimitExceeded( DRCE_TRACK_SEGMENT_LENGTH ) )
return false;
@ -153,16 +154,9 @@ bool DRC_TEST_PROVIDER_TRACK_SEGMENT_LENGTH::Run()
int ii = 0;
thread_pool& tp = GetKiCadThreadPool();
std::vector<std::future<bool>> returns;
auto futures = tp.submit_loop( 0, m_drcEngine->GetBoard()->Tracks().size(), checkTrackSegmentLength );
returns.reserve( m_drcEngine->GetBoard()->Tracks().size() );
for( PCB_TRACK* item : m_drcEngine->GetBoard()->Tracks() )
{
returns.emplace_back( tp.submit( checkTrackSegmentLength, item ) );
}
for( std::future<bool>& ret : returns )
for( auto& ret : futures )
{
std::future_status status = ret.wait_for( std::chrono::milliseconds( 250 ) );

View File

@ -319,27 +319,17 @@ bool DRC_TEST_PROVIDER_ZONE_CONNECTIONS::Run()
total_effort = std::max( (size_t) 1, total_effort );
thread_pool& tp = GetKiCadThreadPool();
std::vector<std::future<int>> returns;
auto returns = tp.submit_loop( 0, zoneLayers.size(),
[&]( const int ii )
{
if( !m_drcEngine->IsCancelled() )
{
testZoneLayer( zoneLayers[ii].first, zoneLayers[ii].second );
done.fetch_add( zoneLayers[ii].first->GetFilledPolysList( zoneLayers[ii].second )->FullPointCount() );
}
} );
returns.reserve( zoneLayers.size() );
for( const std::pair<ZONE*, PCB_LAYER_ID>& zonelayer : zoneLayers )
{
returns.emplace_back( tp.submit(
[&]( ZONE* aZone, PCB_LAYER_ID aLayer ) -> int
{
if( !m_drcEngine->IsCancelled() )
{
testZoneLayer( aZone, aLayer );
done.fetch_add( aZone->GetFilledPolysList( aLayer )->FullPointCount() );
}
return 0;
},
zonelayer.first, zonelayer.second ) );
}
for( const std::future<int>& ret : returns )
for( auto& ret : returns )
{
std::future_status status = ret.wait_for( std::chrono::milliseconds( 250 ) );

View File

@ -2026,76 +2026,73 @@ bool STEP_PCB_MODEL::CreatePCB( SHAPE_POLY_SET& aOutline, const VECTOR2D& aOrigi
{
std::mutex mutex;
auto subtractLoopFn = [&]( const int a, const int b )
auto subtractLoopFn = [&]( const int shapeId )
{
for( int shapeId = a; shapeId < b; shapeId++ )
TopoDS_Shape& shape = vec[shapeId];
Bnd_Box shapeBbox;
BRepBndLib::Add( shape, shapeBbox );
TopTools_ListOfShape holelist;
{
TopoDS_Shape& shape = vec[shapeId];
std::unique_lock lock( mutex );
Bnd_Box shapeBbox;
BRepBndLib::Add( shape, shapeBbox );
const TColStd_ListOfInteger& indices = aBSBHoles.Compare( shapeBbox );
TopTools_ListOfShape holelist;
{
std::unique_lock lock( mutex );
const TColStd_ListOfInteger& indices = aBSBHoles.Compare( shapeBbox );
for( const Standard_Integer& index : indices )
holelist.Append( aHolesList[index] );
}
if( holelist.IsEmpty() )
continue;
TopTools_ListOfShape cutArgs;
cutArgs.Append( shape );
BRepAlgoAPI_Cut cut;
cut.SetRunParallel( true );
cut.SetToFillHistory( false );
cut.SetArguments( cutArgs );
cut.SetTools( holelist );
cut.Build();
if( cut.HasErrors() || cut.HasWarnings() )
{
m_reporter->Report( wxString::Format( _( "** Got problems while cutting "
"%s net '%s' **" ),
aWhat,
UnescapeString( netname ) ),
RPT_SEVERITY_ERROR );
shapeBbox.Dump();
if( cut.HasErrors() )
{
wxString msg = _( "Errors:\n" );
wxStringOutputStream os_stream( &msg );
wxStdOutputStream out( os_stream );
cut.DumpErrors( out );
m_reporter->Report( msg, RPT_SEVERITY_ERROR );
}
if( cut.HasWarnings() )
{
wxString msg = _( "Warnings:\n" );
wxStringOutputStream os_stream( &msg );
wxStdOutputStream out( os_stream );
cut.DumpWarnings( out );
m_reporter->Report( msg, RPT_SEVERITY_WARNING );
}
}
shape = cut.Shape();
for( const Standard_Integer& index : indices )
holelist.Append( aHolesList[index] );
}
if( holelist.IsEmpty() )
return; // nothing to cut for this shape
TopTools_ListOfShape cutArgs;
cutArgs.Append( shape );
BRepAlgoAPI_Cut cut;
cut.SetRunParallel( true );
cut.SetToFillHistory( false );
cut.SetArguments( cutArgs );
cut.SetTools( holelist );
cut.Build();
if( cut.HasErrors() || cut.HasWarnings() )
{
m_reporter->Report( wxString::Format( _( "** Got problems while cutting "
"%s net '%s' **" ),
aWhat,
UnescapeString( netname ) ),
RPT_SEVERITY_ERROR );
shapeBbox.Dump();
if( cut.HasErrors() )
{
wxString msg = _( "Errors:\n" );
wxStringOutputStream os_stream( &msg );
wxStdOutputStream out( os_stream );
cut.DumpErrors( out );
m_reporter->Report( msg, RPT_SEVERITY_ERROR );
}
if( cut.HasWarnings() )
{
wxString msg = _( "Warnings:\n" );
wxStringOutputStream os_stream( &msg );
wxStdOutputStream out( os_stream );
cut.DumpWarnings( out );
m_reporter->Report( msg, RPT_SEVERITY_WARNING );
}
}
shape = cut.Shape();
};
tp.parallelize_loop( vec.size(), subtractLoopFn ).wait();
tp.submit_loop( 0, vec.size(), subtractLoopFn ).wait();
}
};
@ -2172,7 +2169,7 @@ bool STEP_PCB_MODEL::CreatePCB( SHAPE_POLY_SET& aOutline, const VECTOR2D& aOrigi
BS::multi_future<void> mf;
for( const auto& [netname, _] : shapesToFuseMap )
mf.push_back( tp.submit( fuseLoopFn, netname ) );
mf.push_back( tp.submit_task( [&, netname]() { fuseLoopFn( netname ); } ) );
mf.wait();
}

View File

@ -207,7 +207,7 @@ void FOOTPRINT_LIST_IMPL::loadFootprints()
};
for( size_t ii = 0; ii < num_elements; ++ii )
returns[ii] = tp.submit( fp_thread );
returns[ii] = tp.submit_task( fp_thread );
for( const std::future<size_t>& ret : returns )
{

View File

@ -581,7 +581,7 @@ void TRACKS_CLEANER::cleanup( bool aDeleteDuplicateVias, bool aDeleteNullSegment
// and extract all of the pairs of segments that might be merged. Then, perform
// the actual merge in the main loop.
thread_pool& tp = GetKiCadThreadPool();
auto merge_returns = tp.parallelize_loop( 0, m_brd->Tracks().size(), track_loop );
auto merge_returns = tp.submit_blocks( 0, m_brd->Tracks().size(), track_loop );
bool retval = false;
for( size_t ii = 0; ii < merge_returns.size(); ++ii )

View File

@ -654,49 +654,46 @@ PCB_NET_INSPECTOR_PANEL::calculateNets( const std::vector<NETINFO_ITEM*>& aNetCo
std::mutex resultsMutex;
thread_pool& tp = GetKiCadThreadPool();
auto resultsFuture = tp.parallelize_loop(
0, foundNets.size(),
[&, this, calc]( const int start, const int end )
auto resultsFuture = tp.submit_loop(
0, foundNets.size(),
[&, this, calc]( const int i )
{
int netCode = foundNets[i]->GetNetCode();
constexpr PATH_OPTIMISATIONS opts = { .OptimiseViaLayers = true,
.MergeTracks = true,
.OptimiseTracesInPads = true,
.InferViaInPad = false };
LENGTH_DELAY_STATS lengthDetails = calc->CalculateLengthDetails(
netItemsMap[netCode],
opts,
nullptr,
nullptr,
LENGTH_DELAY_LAYER_OPT::WITH_LAYER_DETAIL,
m_showTimeDomainDetails ? LENGTH_DELAY_DOMAIN_OPT::WITH_DELAY_DETAIL
: LENGTH_DELAY_DOMAIN_OPT::NO_DELAY_DETAIL );
if( aIncludeZeroPadNets || lengthDetails.NumPads > 0 )
{
for( int i = start; i < end; ++i )
{
int netCode = foundNets[i]->GetNetCode();
std::unique_ptr<LIST_ITEM> new_item = std::make_unique<LIST_ITEM>( foundNets[i] );
constexpr PATH_OPTIMISATIONS opts = { .OptimiseViaLayers = true,
.MergeTracks = true,
.OptimiseTracesInPads = true,
.InferViaInPad = false };
new_item->SetPadCount( lengthDetails.NumPads );
new_item->SetLayerCount( m_board->GetCopperLayerCount() );
new_item->SetPadDieLength( lengthDetails.PadToDieLength );
new_item->SetPadDieDelay( lengthDetails.PadToDieDelay );
new_item->SetViaCount( lengthDetails.NumVias );
new_item->SetViaLength( lengthDetails.ViaLength );
new_item->SetViaDelay( lengthDetails.ViaDelay );
new_item->SetLayerWireLengths( *lengthDetails.LayerLengths );
LENGTH_DELAY_STATS lengthDetails = calc->CalculateLengthDetails(
netItemsMap[netCode],
opts,
nullptr,
nullptr,
LENGTH_DELAY_LAYER_OPT::WITH_LAYER_DETAIL,
m_showTimeDomainDetails ? LENGTH_DELAY_DOMAIN_OPT::WITH_DELAY_DETAIL
: LENGTH_DELAY_DOMAIN_OPT::NO_DELAY_DETAIL );
if( m_showTimeDomainDetails )
new_item->SetLayerWireDelays( *lengthDetails.LayerDelays );
if( aIncludeZeroPadNets || lengthDetails.NumPads > 0 )
{
std::unique_ptr<LIST_ITEM> new_item = std::make_unique<LIST_ITEM>( foundNets[i] );
new_item->SetPadCount( lengthDetails.NumPads );
new_item->SetLayerCount( m_board->GetCopperLayerCount() );
new_item->SetPadDieLength( lengthDetails.PadToDieLength );
new_item->SetPadDieDelay( lengthDetails.PadToDieDelay );
new_item->SetViaCount( lengthDetails.NumVias );
new_item->SetViaLength( lengthDetails.ViaLength );
new_item->SetViaDelay( lengthDetails.ViaDelay );
new_item->SetLayerWireLengths( *lengthDetails.LayerLengths );
if( m_showTimeDomainDetails )
new_item->SetLayerWireDelays( *lengthDetails.LayerDelays );
std::scoped_lock lock( resultsMutex );
results.emplace_back( std::move( new_item ) );
}
}
} );
std::scoped_lock lock( resultsMutex );
results.emplace_back( std::move( new_item ) );
}
} );
resultsFuture.get();

View File

@ -611,7 +611,7 @@ bool ZONE_FILLER::Fill( const std::vector<ZONE*>& aZones, bool aCheck, wxWindow*
thread_pool& tp = GetKiCadThreadPool();
for( const std::pair<ZONE*, PCB_LAYER_ID>& fillItem : toFill )
returns.emplace_back( std::make_pair( tp.submit( fill_lambda, fillItem ), 0 ) );
returns.emplace_back( std::make_pair( tp.submit_task( [&, fillItem]() { return fill_lambda( fillItem ); } ), 0 ) );
while( !cancelled && finished != 2 * toFill.size() )
{
@ -636,9 +636,9 @@ bool ZONE_FILLER::Fill( const std::vector<ZONE*>& aZones, bool aCheck, wxWindow*
{
// Queue the next step (will re-queue the existing step if it didn't complete)
if( ret.second == 0 )
returns[ii].first = tp.submit( fill_lambda, toFill[ii] );
returns[ii].first = tp.submit_task( [&, idx = ii]() { return fill_lambda( toFill[idx] ); } );
else if( ret.second == 1 )
returns[ii].first = tp.submit( tesselate_lambda, toFill[ii] );
returns[ii].first = tp.submit_task( [&, idx = ii]() { return tesselate_lambda( toFill[idx] ); } );
}
}
}
@ -827,7 +827,7 @@ bool ZONE_FILLER::Fill( const std::vector<ZONE*>& aZones, bool aCheck, wxWindow*
return retval;
};
auto island_returns = tp.parallelize_loop( 0, polys_to_check.size(), island_lambda );
auto island_returns = tp.submit_blocks( 0, polys_to_check.size(), island_lambda );
cancelled = false;
// Allow island removal threads to finish

View File

@ -97,7 +97,7 @@ long PYTHON_MANAGER::Execute( const std::vector<wxString>& aArgs,
PYTHON_PROCESS* process = new PYTHON_PROCESS( aCallback );
process->Redirect();
auto monitor =
auto monitor =
[]( PYTHON_PROCESS* aProcess )
{
wxInputStream* processOut = aProcess->GetInputStream();
@ -147,7 +147,7 @@ long PYTHON_MANAGER::Execute( const std::vector<wxString>& aArgs,
if( !aSaveOutput )
{
thread_pool& tp = GetKiCadThreadPool();
auto ret = tp.submit( monitor, process );
auto ret = tp.submit_task( [monitor, process] { monitor( process ); } );
}
}
@ -236,7 +236,7 @@ std::optional<wxString> PYTHON_MANAGER::GetVirtualPython( const wxString& aNames
return std::nullopt;
wxFileName python( *envPath, wxEmptyString );
#ifdef _WIN32
python.AppendDir( "Scripts" );
python.SetFullName( "pythonw.exe" );

View File

@ -5,21 +5,20 @@
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#cmake_minimum_required(VERSION 3.4)
# Propagate this policy (FindPythonInterp removal) so it can be detected later
if(NOT CMAKE_VERSION VERSION_LESS "3.27")
cmake_policy(GET CMP0148 _pybind11_cmp0148)
endif()
# The `cmake_minimum_required(VERSION 3.4...3.22)` syntax does not work with
# some versions of VS that have a patched CMake 3.11. This forces us to emulate
# the behavior using the following workaround:
if(${CMAKE_VERSION} VERSION_LESS 3.22)
cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
else()
cmake_policy(VERSION 3.22)
cmake_minimum_required(VERSION 3.15...4.0)
if(_pybind11_cmp0148)
cmake_policy(SET CMP0148 ${_pybind11_cmp0148})
unset(_pybind11_cmp0148)
endif()
# Avoid infinite recursion if tests include this as a subdirectory
if(DEFINED PYBIND11_MASTER_PROJECT)
return()
endif()
include_guard(GLOBAL)
# Extract project version from source
file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/pybind11/detail/common.h"
@ -64,16 +63,15 @@ if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR)
set(PYBIND11_MASTER_PROJECT ON)
if(OSX AND CMAKE_VERSION VERSION_LESS 3.7)
# Bug in macOS CMake < 3.7 is unable to download catch
message(WARNING "CMAKE 3.7+ needed on macOS to download catch, and newer HIGHLY recommended")
elseif(WINDOWS AND CMAKE_VERSION VERSION_LESS 3.8)
# Only tested with 3.8+ in CI.
message(WARNING "CMAKE 3.8+ tested on Windows, previous versions untested")
endif()
message(STATUS "CMake ${CMAKE_VERSION}")
if(DEFINED SKBUILD AND DEFINED ENV{PYBIND11_GLOBAL_SDIST})
message(
FATAL_ERROR
"PYBIND11_GLOBAL_SDIST is not supported, use nox -s build_global or a pybind11-global SDist instead."
)
endif()
if(CMAKE_CXX_STANDARD)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
@ -82,59 +80,160 @@ if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR)
set(pybind11_system "")
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
if(CMAKE_VERSION VERSION_LESS "3.18")
set(_pybind11_findpython_default OFF)
else()
set(_pybind11_findpython_default ON)
endif()
else()
set(PYBIND11_MASTER_PROJECT OFF)
set(pybind11_system SYSTEM)
set(_pybind11_findpython_default COMPAT)
endif()
# Options
option(PYBIND11_INSTALL "Install pybind11 header files?" ${PYBIND11_MASTER_PROJECT})
option(PYBIND11_TEST "Build pybind11 test suite?" ${PYBIND11_MASTER_PROJECT})
option(PYBIND11_NOPYTHON "Disable search for Python" OFF)
option(PYBIND11_DISABLE_HANDLE_TYPE_NAME_DEFAULT_IMPLEMENTATION
"To enforce that a handle_type_name<> specialization exists" OFF)
option(PYBIND11_SIMPLE_GIL_MANAGEMENT
"Use simpler GIL management logic that does not support disassociation" OFF)
set(PYBIND11_INTERNALS_VERSION
""
CACHE STRING "Override the ABI version, may be used to enable the unstable ABI.")
option(PYBIND11_USE_CROSSCOMPILING "Respect CMAKE_CROSSCOMPILING" OFF)
if(PYBIND11_DISABLE_HANDLE_TYPE_NAME_DEFAULT_IMPLEMENTATION)
add_compile_definitions(PYBIND11_DISABLE_HANDLE_TYPE_NAME_DEFAULT_IMPLEMENTATION)
endif()
if(PYBIND11_SIMPLE_GIL_MANAGEMENT)
add_compile_definitions(PYBIND11_SIMPLE_GIL_MANAGEMENT)
endif()
cmake_dependent_option(
USE_PYTHON_INCLUDE_DIR
"Install pybind11 headers in Python include directory instead of default installation prefix"
OFF "PYBIND11_INSTALL" OFF)
cmake_dependent_option(PYBIND11_FINDPYTHON "Force new FindPython" OFF
"NOT CMAKE_VERSION VERSION_LESS 3.12" OFF)
set(PYBIND11_FINDPYTHON
${_pybind11_findpython_default}
CACHE STRING "Force new FindPython - NEW, OLD, COMPAT")
if(PYBIND11_MASTER_PROJECT)
# Allow PYTHON_EXECUTABLE if in FINDPYTHON mode and building pybind11's tests
# (makes transition easier while we support both modes).
if(PYBIND11_FINDPYTHON
AND DEFINED PYTHON_EXECUTABLE
AND NOT DEFINED Python_EXECUTABLE)
set(Python_EXECUTABLE "${PYTHON_EXECUTABLE}")
endif()
# This is a shortcut that is primarily for the venv cmake preset,
# but can be used to quickly setup tests manually, too
set(PYBIND11_CREATE_WITH_UV
""
CACHE STRING "Create a virtualenv if it doesn't exist")
if(NOT PYBIND11_CREATE_WITH_UV STREQUAL "")
set(Python_ROOT_DIR "${CMAKE_CURRENT_BINARY_DIR}/.venv")
if(EXISTS "${Python_ROOT_DIR}")
if(EXISTS "${CMAKE_BINARY_DIR}/CMakeCache.txt")
message(STATUS "Using existing venv at ${Python_ROOT_DIR}, remove or --fresh to recreate")
else()
# --fresh used to remove the cache
file(REMOVE_RECURSE "${CMAKE_CURRENT_BINARY_DIR}/.venv")
endif()
endif()
if(NOT EXISTS "${Python_ROOT_DIR}")
find_program(UV uv REQUIRED)
# CMake 3.19+ would be able to use COMMAND_ERROR_IS_FATAL
message(
STATUS "Creating venv with ${UV} venv -p ${PYBIND11_CREATE_WITH_UV} '${Python_ROOT_DIR}'")
execute_process(COMMAND ${UV} venv -p ${PYBIND11_CREATE_WITH_UV} "${Python_ROOT_DIR}"
RESULT_VARIABLE _venv_result)
if(_venv_result AND NOT _venv_result EQUAL 0)
message(FATAL_ERROR "uv venv failed with '${_venv_result}'")
endif()
message(
STATUS
"Installing deps with ${UV} pip install -p '${Python_ROOT_DIR}' -r tests/requirements.txt"
)
execute_process(
COMMAND ${UV} pip install -p "${Python_ROOT_DIR}" -r
"${CMAKE_CURRENT_SOURCE_DIR}/tests/requirements.txt" RESULT_VARIABLE _pip_result)
if(_pip_result AND NOT _pip_result EQUAL 0)
message(FATAL_ERROR "uv pip install failed with '${_pip_result}'")
endif()
endif()
else()
if(NOT DEFINED Python3_EXECUTABLE
AND NOT DEFINED Python_EXECUTABLE
AND NOT DEFINED Python_ROOT_DIR
AND NOT DEFINED ENV{VIRTUALENV}
AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.venv")
message(STATUS "Autodetecting Python in virtual environment")
set(Python_ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/.venv")
endif()
endif()
endif()
# NB: when adding a header don't forget to also add it to setup.py
set(PYBIND11_HEADERS
include/pybind11/detail/class.h
include/pybind11/detail/common.h
include/pybind11/detail/cpp_conduit.h
include/pybind11/detail/descr.h
include/pybind11/detail/dynamic_raw_ptr_cast_if_possible.h
include/pybind11/detail/exception_translation.h
include/pybind11/detail/function_record_pyobject.h
include/pybind11/detail/init.h
include/pybind11/detail/internals.h
include/pybind11/detail/native_enum_data.h
include/pybind11/detail/pybind11_namespace_macros.h
include/pybind11/detail/struct_smart_holder.h
include/pybind11/detail/type_caster_base.h
include/pybind11/detail/typeid.h
include/pybind11/detail/using_smart_holder.h
include/pybind11/detail/value_and_holder.h
include/pybind11/attr.h
include/pybind11/buffer_info.h
include/pybind11/cast.h
include/pybind11/chrono.h
include/pybind11/common.h
include/pybind11/complex.h
include/pybind11/conduit/pybind11_conduit_v1.h
include/pybind11/conduit/pybind11_platform_abi_id.h
include/pybind11/conduit/wrap_include_python_h.h
include/pybind11/critical_section.h
include/pybind11/options.h
include/pybind11/eigen.h
include/pybind11/eigen/common.h
include/pybind11/eigen/matrix.h
include/pybind11/eigen/tensor.h
include/pybind11/embed.h
include/pybind11/eval.h
include/pybind11/gil.h
include/pybind11/gil_safe_call_once.h
include/pybind11/gil_simple.h
include/pybind11/iostream.h
include/pybind11/functional.h
include/pybind11/native_enum.h
include/pybind11/numpy.h
include/pybind11/operators.h
include/pybind11/pybind11.h
include/pybind11/pytypes.h
include/pybind11/subinterpreter.h
include/pybind11/stl.h
include/pybind11/stl_bind.h
include/pybind11/stl/filesystem.h)
include/pybind11/stl/filesystem.h
include/pybind11/trampoline_self_life_support.h
include/pybind11/type_caster_pyobject_ptr.h
include/pybind11/typing.h
include/pybind11/warnings.h)
# Compare with grep and warn if mismatched
if(PYBIND11_MASTER_PROJECT AND NOT CMAKE_VERSION VERSION_LESS 3.12)
if(PYBIND11_MASTER_PROJECT)
file(
GLOB_RECURSE _pybind11_header_check
LIST_DIRECTORIES false
@ -152,10 +251,7 @@ if(PYBIND11_MASTER_PROJECT AND NOT CMAKE_VERSION VERSION_LESS 3.12)
endif()
endif()
# CMake 3.12 added list(TRANSFORM <list> PREPEND
# But we can't use it yet
string(REPLACE "include/" "${CMAKE_CURRENT_SOURCE_DIR}/include/" PYBIND11_HEADERS
"${PYBIND11_HEADERS}")
list(TRANSFORM PYBIND11_HEADERS PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/")
# Cache variable so this can be used in parent projects
set(pybind11_INCLUDE_DIR
@ -198,6 +294,9 @@ else()
endif()
include("${CMAKE_CURRENT_SOURCE_DIR}/tools/pybind11Common.cmake")
# https://github.com/jtojnar/cmake-snips/#concatenating-paths-when-building-pkg-config-files
# TODO: cmake 3.20 adds the cmake_path() function, which obsoletes this snippet
include("${CMAKE_CURRENT_SOURCE_DIR}/tools/JoinPaths.cmake")
# Relative directory setting
if(USE_PYTHON_INCLUDE_DIR AND DEFINED Python_INCLUDE_DIRS)
@ -207,6 +306,9 @@ elseif(USE_PYTHON_INCLUDE_DIR AND DEFINED PYTHON_INCLUDE_DIR)
endif()
if(PYBIND11_INSTALL)
if(DEFINED SKBUILD_PROJECT_NAME AND SKBUILD_PROJECT_NAME STREQUAL "pybind11_global")
install(DIRECTORY ${pybind11_INCLUDE_DIR}/pybind11 DESTINATION "${SKBUILD_HEADERS_DIR}")
endif()
install(DIRECTORY ${pybind11_INCLUDE_DIR}/pybind11 DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
set(PYBIND11_CMAKECONFIG_INSTALL_DIR
"${CMAKE_INSTALL_DATAROOTDIR}/cmake/${PROJECT_NAME}"
@ -222,25 +324,11 @@ if(PYBIND11_INSTALL)
tools/${PROJECT_NAME}Config.cmake.in "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
INSTALL_DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})
if(CMAKE_VERSION VERSION_LESS 3.14)
# Remove CMAKE_SIZEOF_VOID_P from ConfigVersion.cmake since the library does
# not depend on architecture specific settings or libraries.
set(_PYBIND11_CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P})
unset(CMAKE_SIZEOF_VOID_P)
write_basic_package_version_file(
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
VERSION ${PROJECT_VERSION}
COMPATIBILITY AnyNewerVersion)
set(CMAKE_SIZEOF_VOID_P ${_PYBIND11_CMAKE_SIZEOF_VOID_P})
else()
# CMake 3.14+ natively supports header-only libraries
write_basic_package_version_file(
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
VERSION ${PROJECT_VERSION}
COMPATIBILITY AnyNewerVersion ARCH_INDEPENDENT)
endif()
# CMake natively supports header-only libraries
write_basic_package_version_file(
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
VERSION ${PROJECT_VERSION}
COMPATIBILITY AnyNewerVersion ARCH_INDEPENDENT)
install(
FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
@ -249,6 +337,7 @@ if(PYBIND11_INSTALL)
tools/pybind11Common.cmake
tools/pybind11Tools.cmake
tools/pybind11NewTools.cmake
tools/pybind11GuessPythonExtSuffix.cmake
DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})
if(NOT PYBIND11_EXPORT_NAME)
@ -262,6 +351,41 @@ if(PYBIND11_INSTALL)
NAMESPACE "pybind11::"
DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})
# pkg-config support
if(NOT prefix_for_pc_file)
if(IS_ABSOLUTE "${CMAKE_INSTALL_DATAROOTDIR}")
set(prefix_for_pc_file "${CMAKE_INSTALL_PREFIX}")
else()
set(pc_datarootdir "${CMAKE_INSTALL_DATAROOTDIR}")
if(CMAKE_VERSION VERSION_LESS 3.20)
set(prefix_for_pc_file "\${pcfiledir}/..")
while(pc_datarootdir)
get_filename_component(pc_datarootdir "${pc_datarootdir}" DIRECTORY)
string(APPEND prefix_for_pc_file "/..")
endwhile()
else()
cmake_path(RELATIVE_PATH CMAKE_INSTALL_PREFIX BASE_DIRECTORY CMAKE_INSTALL_DATAROOTDIR
OUTPUT_VARIABLE prefix_for_pc_file)
endif()
endif()
endif()
join_paths(includedir_for_pc_file "\${prefix}" "${CMAKE_INSTALL_INCLUDEDIR}")
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/tools/pybind11.pc.in"
"${CMAKE_CURRENT_BINARY_DIR}/pybind11.pc" @ONLY)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/pybind11.pc"
DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/pkgconfig/")
# When building a wheel, include __init__.py's for modules
# (see https://github.com/pybind/pybind11/pull/5552)
if(DEFINED SKBUILD_PROJECT_NAME AND SKBUILD_PROJECT_NAME STREQUAL "pybind11")
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/empty")
file(TOUCH "${CMAKE_CURRENT_BINARY_DIR}/empty/__init__.py")
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/empty/__init__.py"
DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/")
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/empty/__init__.py"
DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/pkgconfig/")
endif()
# Uninstall target
if(PYBIND11_MASTER_PROJECT)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/tools/cmake_uninstall.cmake.in"

View File

@ -1,6 +0,0 @@
recursive-include pybind11/include/pybind11 *.h
recursive-include pybind11 *.py
recursive-include pybind11 py.typed
recursive-include pybind11 *.pyi
include pybind11/share/cmake/pybind11/*.cmake
include LICENSE README.rst pyproject.toml setup.py setup.cfg

View File

@ -1,9 +1,11 @@
.. figure:: https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png
:alt: pybind11 logo
**pybind11 — Seamless operability between C++11 and Python**
**pybind11 (v3) — Seamless interoperability between C++ and Python**
|Latest Documentation Status| |Stable Documentation Status| |Gitter chat| |GitHub Discussions| |CI| |Build status|
|Latest Documentation Status| |Stable Documentation Status| |Gitter chat| |GitHub Discussions|
|CI| |Build status| |SPEC 4 — Using and Creating Nightly Wheels|
|Repology| |PyPI package| |Conda-forge| |Python Versions|
@ -32,14 +34,14 @@ this heavy machinery has become an excessively large and unnecessary
dependency.
Think of this library as a tiny self-contained version of Boost.Python
with everything stripped away that isnt relevant for binding
with everything stripped away that isn't relevant for binding
generation. Without comments, the core header files only require ~4K
lines of code and depend on Python (2.7 or 3.5+, or PyPy) and the C++
standard library. This compact implementation was possible thanks to
some of the new C++11 language features (specifically: tuples, lambda
functions and variadic templates). Since its creation, this library has
grown beyond Boost.Python in many ways, leading to dramatically simpler
binding code in many common situations.
lines of code and depend on Python (CPython 3.8+, PyPy, or GraalPy) and the C++
standard library. This compact implementation was possible thanks to some C++11
language features (specifically: tuples, lambda functions and variadic
templates). Since its creation, this library has grown beyond Boost.Python in
many ways, leading to dramatically simpler binding code in many common
situations.
Tutorial and reference documentation is provided at
`pybind11.readthedocs.io <https://pybind11.readthedocs.io/en/latest>`_.
@ -71,6 +73,7 @@ pybind11 can map the following core C++ features to Python:
- Internal references with correct reference counting
- C++ classes with virtual (and pure virtual) methods can be extended
in Python
- Integrated NumPy support (NumPy 2 requires pybind11 2.12+)
Goodies
-------
@ -78,8 +81,9 @@ Goodies
In addition to the core functionality, pybind11 provides some extra
goodies:
- Python 2.7, 3.5+, and PyPy/PyPy3 7.3 are supported with an
implementation-agnostic interface.
- CPython 3.8+, PyPy3 7.3.17+, and GraalPy 24.1+ are supported with an
implementation-agnostic interface (see older versions for older CPython
and PyPy versions).
- It is possible to bind C++11 lambda functions with captured
variables. The lambda capture data is stored inside the resulting
@ -88,8 +92,8 @@ goodies:
- pybind11 uses C++11 move constructors and move assignment operators
whenever possible to efficiently transfer custom data types.
- Its easy to expose the internal storage of custom data types through
Pythons buffer protocols. This is handy e.g. for fast conversion
- It's easy to expose the internal storage of custom data types through
Pythons' buffer protocols. This is handy e.g. for fast conversion
between C++ matrix classes like Eigen and NumPy without expensive
copy operations.
@ -119,25 +123,55 @@ goodies:
Supported compilers
-------------------
1. Clang/LLVM 3.3 or newer (for Apple Xcodes clang, this is 5.0.0 or
1. Clang/LLVM 3.3 or newer (for Apple Xcode's clang, this is 5.0.0 or
newer)
2. GCC 4.8 or newer
3. Microsoft Visual Studio 2015 Update 3 or newer
3. Microsoft Visual Studio 2022 or newer (2019 probably works, but was dropped in CI)
4. Intel classic C++ compiler 18 or newer (ICC 20.2 tested in CI)
5. Cygwin/GCC (previously tested on 2.5.1)
6. NVCC (CUDA 11.0 tested in CI)
7. NVIDIA PGI (20.9 tested in CI)
Supported Platforms
-------------------
* Windows, Linux, macOS, and iOS
* CPython 3.8+, Pyodide, PyPy, and GraalPy
* C++11, C++14, C++17, C++20, and C++23
About
-----
This project was created by `Wenzel
Jakob <http://rgl.epfl.ch/people/wjakob>`_. Significant features and/or
improvements to the code were contributed by Jonas Adler, Lori A. Burns,
Sylvain Corlay, Eric Cousineau, Aaron Gokaslan, Ralf Grosse-Kunstleve, Trent Houliston, Axel
Huebl, @hulucc, Yannick Jadoul, Sergey Lyskov Johan Mabille, Tomasz Miąsko,
Dean Moldovan, Ben Pritchard, Jason Rhinelander, Boris Schäling, Pim
Schellart, Henry Schreiner, Ivan Smirnov, Boris Staletic, and Patrick Stewart.
improvements to the code were contributed by
Jonas Adler,
Lori A. Burns,
Sylvain Corlay,
Eric Cousineau,
Aaron Gokaslan,
Ralf Grosse-Kunstleve,
Trent Houliston,
Axel Huebl,
@hulucc,
Yannick Jadoul,
Sergey Lyskov,
Johan Mabille,
Tomasz Miąsko,
Dean Moldovan,
Ben Pritchard,
Jason Rhinelander,
Boris Schäling,
Pim Schellart,
Henry Schreiner,
Ivan Smirnov,
Dustin Spicuzza,
Boris Staletic,
Ethan Steinberg,
Patrick Stewart,
Ivor Wanders,
and
Xiaofei Wang.
We thank Google for a generous financial contribution to the continuous
integration infrastructure used by this project.
@ -178,3 +212,5 @@ to the terms and conditions of this license.
:target: https://pypi.org/project/pybind11/
.. |GitHub Discussions| image:: https://img.shields.io/static/v1?label=Discussions&message=Ask&color=blue&logo=github
:target: https://github.com/pybind/pybind11/discussions
.. |SPEC 4 — Using and Creating Nightly Wheels| image:: https://img.shields.io/badge/SPEC-4-green?labelColor=%23004811&color=%235CA038
:target: https://scientific-python.org/specs/spec-0004/

13
thirdparty/pybind11/SECURITY.md vendored Normal file
View File

@ -0,0 +1,13 @@
# Security Policy
## Supported Versions
Security updates are applied only to the latest release.
## Reporting a Vulnerability
If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
Please disclose it at [security advisory](https://github.com/pybind/pybind11/security/advisories/new).
This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure.

View File

@ -18,5 +18,4 @@ ALIASES += "endrst=\endverbatim"
QUIET = YES
WARNINGS = YES
WARN_IF_UNDOCUMENTED = NO
PREDEFINED = PY_MAJOR_VERSION=3 \
PYBIND11_NOINLINE
PREDEFINED = PYBIND11_NOINLINE

View File

@ -0,0 +1,3 @@
.highlight .go {
color: #707070;
}

View File

@ -1,11 +0,0 @@
.wy-table-responsive table td,
.wy-table-responsive table th {
white-space: initial !important;
}
.rst-content table.docutils td {
vertical-align: top !important;
}
div[class^='highlight'] pre {
white-space: pre;
white-space: pre-wrap;
}

View File

@ -1,35 +1,55 @@
.. _custom_type_caster:
Custom type casters
===================
In very rare cases, applications may require custom type casters that cannot be
expressed using the abstractions provided by pybind11, thus requiring raw
Python C API calls. This is fairly advanced usage and should only be pursued by
experts who are familiar with the intricacies of Python reference counting.
Some applications may prefer custom type casters that convert between existing
Python types and C++ types, similar to the ``list````std::vector``
and ``dict````std::map`` conversions which are built into pybind11.
Implementing custom type casters is fairly advanced usage.
While it is recommended to use the pybind11 API as much as possible, more complex examples may
require familiarity with the intricacies of the Python C API.
You can refer to the `Python/C API Reference Manual <https://docs.python.org/3/c-api/index.html>`_
for more information.
The following snippets demonstrate how this works for a very simple ``inty``
type that that should be convertible from Python types that provide a
``__int__(self)`` method.
The following snippets demonstrate how this works for a very simple ``Point2D`` type.
We want this type to be convertible to C++ from Python types implementing the
``Sequence`` protocol and having two elements of type ``float``.
When returned from C++ to Python, it should be converted to a Python ``tuple[float, float]``.
For this type we could provide Python bindings for different arithmetic functions implemented
in C++ (here demonstrated by a simple ``negate`` function).
..
PLEASE KEEP THE CODE BLOCKS IN SYNC WITH
tests/test_docs_advanced_cast_custom.cpp
tests/test_docs_advanced_cast_custom.py
Ideally, change the test, run pre-commit (incl. clang-format),
then copy the changed code back here.
Also use TEST_SUBMODULE in tests, but PYBIND11_MODULE in docs.
.. code-block:: cpp
struct inty { long long_value; };
namespace user_space {
void print(inty s) {
std::cout << s.long_value << std::endl;
}
struct Point2D {
double x;
double y;
};
The following Python snippet demonstrates the intended usage from the Python side:
Point2D negate(const Point2D &point) { return Point2D{-point.x, -point.y}; }
} // namespace user_space
The following Python snippet demonstrates the intended usage of ``negate`` from the Python side:
.. code-block:: python
class A:
def __int__(self):
return 123
from my_math_module import docs_advanced_cast_custom as m
from example import print
print(A())
point1 = [1.0, -1.0]
point2 = m.negate(point1)
assert point2 == (-1.0, 1.0)
To register the necessary conversion routines, it is necessary to add an
instantiation of the ``pybind11::detail::type_caster<T>`` template.
@ -38,47 +58,57 @@ type is explicitly allowed.
.. code-block:: cpp
namespace pybind11 { namespace detail {
template <> struct type_caster<inty> {
public:
/**
* This macro establishes the name 'inty' in
* function signatures and declares a local variable
* 'value' of type inty
*/
PYBIND11_TYPE_CASTER(inty, const_name("inty"));
namespace pybind11 {
namespace detail {
/**
* Conversion part 1 (Python->C++): convert a PyObject into a inty
* instance or return false upon failure. The second argument
* indicates whether implicit conversions should be applied.
*/
bool load(handle src, bool) {
/* Extract PyObject from handle */
PyObject *source = src.ptr();
/* Try converting into a Python integer value */
PyObject *tmp = PyNumber_Long(source);
if (!tmp)
template <>
struct type_caster<user_space::Point2D> {
// This macro inserts a lot of boilerplate code and sets the type hint.
// `io_name` is used to specify different type hints for arguments and return values.
// The signature of our negate function would then look like:
// `negate(Sequence[float]) -> tuple[float, float]`
PYBIND11_TYPE_CASTER(user_space::Point2D, io_name("Sequence[float]", "tuple[float, float]"));
// C++ -> Python: convert `Point2D` to `tuple[float, float]`. The second and third arguments
// are used to indicate the return value policy and parent object (for
// return_value_policy::reference_internal) and are often ignored by custom casters.
// The return value should reflect the type hint specified by the second argument of `io_name`.
static handle
cast(const user_space::Point2D &number, return_value_policy /*policy*/, handle /*parent*/) {
return py::make_tuple(number.x, number.y).release();
}
// Python -> C++: convert a `PyObject` into a `Point2D` and return false upon failure. The
// second argument indicates whether implicit conversions should be allowed.
// The accepted types should reflect the type hint specified by the first argument of
// `io_name`.
bool load(handle src, bool /*convert*/) {
// Check if handle is a Sequence
if (!py::isinstance<py::sequence>(src)) {
return false;
}
auto seq = py::reinterpret_borrow<py::sequence>(src);
// Check if exactly two values are in the Sequence
if (seq.size() != 2) {
return false;
}
// Check if each element is either a float or an int
for (auto item : seq) {
if (!py::isinstance<py::float_>(item) && !py::isinstance<py::int_>(item)) {
return false;
/* Now try to convert into a C++ int */
value.long_value = PyLong_AsLong(tmp);
Py_DECREF(tmp);
/* Ensure return code was OK (to avoid out-of-range errors etc) */
return !(value.long_value == -1 && !PyErr_Occurred());
}
}
value.x = seq[0].cast<double>();
value.y = seq[1].cast<double>();
return true;
}
};
/**
* Conversion part 2 (C++ -> Python): convert an inty instance into
* a Python object. The second and third arguments are used to
* indicate the return value policy and parent object (for
* ``return_value_policy::reference_internal``) and are generally
* ignored by implicit casters.
*/
static handle cast(inty src, return_value_policy /* policy */, handle /* parent */) {
return PyLong_FromLong(src.long_value);
}
};
}} // namespace pybind11::detail
} // namespace detail
} // namespace pybind11
// Bind the negate function
PYBIND11_MODULE(docs_advanced_cast_custom, m, py::mod_gil_not_used()) { m.def("negate", user_space::negate); }
.. note::
@ -86,8 +116,22 @@ type is explicitly allowed.
that ``T`` is default-constructible (``value`` is first default constructed
and then ``load()`` assigns to it).
.. note::
For further information on the ``return_value_policy`` argument of ``cast`` refer to :ref:`return_value_policies`.
To learn about the ``convert`` argument of ``load`` see :ref:`nonconverting_arguments`.
.. warning::
When using custom type casters, it's important to declare them consistently
in every compilation unit of the Python extension module. Otherwise,
in every compilation unit of the Python extension module to satisfy the C++ One Definition Rule
(`ODR <https://en.cppreference.com/w/cpp/language/definition>`_). Otherwise,
undefined behavior can ensue.
.. note::
Using the type hint ``Sequence[float]`` signals to static type checkers, that not only tuples may be
passed, but any type implementing the Sequence protocol, e.g., ``list[float]``.
Unfortunately, that loses the length information ``tuple[float, float]`` provides.
One way of still providing some length information in type hints is using ``typing.Annotated``, e.g.,
``Annotated[Sequence[float], 2]``, or further add libraries like
`annotated-types <https://github.com/annotated-types/annotated-types>`_.

View File

@ -259,7 +259,7 @@ copying to take place:
"small"_a // <- This one can be copied if needed
);
With the above binding code, attempting to call the the ``some_method(m)``
With the above binding code, attempting to call the ``some_method(m)``
method on a ``MyClass`` object, or attempting to call ``some_function(m, m2)``
will raise a ``RuntimeError`` rather than making a temporary copy of the array.
It will, however, allow the ``m2`` argument to be copied into a temporary if

View File

@ -56,7 +56,7 @@ trivial to generate binding code for all of these functions.
#include <pybind11/functional.h>
PYBIND11_MODULE(example, m) {
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
m.def("func_arg", &func_arg);
m.def("func_ret", &func_ret);
m.def("func_cpp", &func_cpp);

View File

@ -151,7 +151,7 @@ as arguments and return values, refer to the section on binding :ref:`classes`.
+------------------------------------+---------------------------+-----------------------------------+
| ``std::variant<...>`` | Type-safe union (C++17) | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::filesystem::path<T>`` | STL path (C++17) [#]_ | :file:`pybind11/stl/filesystem.h` |
| ``std::filesystem::path`` | STL path (C++17) [#]_ | :file:`pybind11/stl/filesystem.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::function<...>`` | STL polymorphic function | :file:`pybind11/functional.h` |
+------------------------------------+---------------------------+-----------------------------------+
@ -167,5 +167,4 @@ as arguments and return values, refer to the section on binding :ref:`classes`.
+------------------------------------+---------------------------+-----------------------------------+
.. [#] ``std::filesystem::path`` is converted to ``pathlib.Path`` and
``os.PathLike`` is converted to ``std::filesystem::path``, but this requires
Python 3.6 (for ``__fspath__`` support).
can be loaded from ``os.PathLike``, ``str``, and ``bytes``.

View File

@ -42,7 +42,7 @@ types:
.. code-block:: cpp
// `boost::optional` as an example -- can be any `std::optional`-like container
namespace pybind11 { namespace detail {
namespace PYBIND11_NAMESPACE { namespace detail {
template <typename T>
struct type_caster<boost::optional<T>> : optional_caster<boost::optional<T>> {};
}}
@ -54,7 +54,7 @@ for custom variant types:
.. code-block:: cpp
// `boost::variant` as an example -- can be any `std::variant`-like container
namespace pybind11 { namespace detail {
namespace PYBIND11_NAMESPACE { namespace detail {
template <typename... Ts>
struct type_caster<boost::variant<Ts...>> : variant_caster<boost::variant<Ts...>> {};
@ -66,7 +66,7 @@ for custom variant types:
return boost::apply_visitor(args...);
}
};
}} // namespace pybind11::detail
}} // namespace PYBIND11_NAMESPACE::detail
The ``visit_helper`` specialization is not required if your ``name::variant`` provides
a ``name::visit()`` function. For any other function name, the specialization must be
@ -87,8 +87,6 @@ included to tell pybind11 how to visit the variant.
pybind11 only supports the modern implementation of ``boost::variant``
which makes use of variadic templates. This requires Boost 1.56 or newer.
Additionally, on Windows, MSVC 2017 is required because ``boost::variant``
falls back to the old non-variadic implementation on MSVC 2015.
.. _opaque:
@ -164,15 +162,15 @@ the declaration
.. code-block:: cpp
PYBIND11_MAKE_OPAQUE(std::vector<int>);
PYBIND11_MAKE_OPAQUE(std::vector<int>)
before any binding code (e.g. invocations to ``class_::def()``, etc.). This
macro must be specified at the top level (and outside of any namespaces), since
it adds a template instantiation of ``type_caster``. If your binding code consists of
multiple compilation units, it must be present in every file (typically via a
common header) preceding any usage of ``std::vector<int>``. Opaque types must
also have a corresponding ``class_`` declaration to associate them with a name
in Python, and to define a set of available operations, e.g.:
also have a corresponding ``py::class_`` declaration to associate them with a
name in Python, and to define a set of available operations, e.g.:
.. code-block:: cpp
@ -209,8 +207,8 @@ The following example showcases usage of :file:`pybind11/stl_bind.h`:
// Don't forget this
#include <pybind11/stl_bind.h>
PYBIND11_MAKE_OPAQUE(std::vector<int>);
PYBIND11_MAKE_OPAQUE(std::map<std::string, double>);
PYBIND11_MAKE_OPAQUE(std::vector<int>)
PYBIND11_MAKE_OPAQUE(std::map<std::string, double>)
// ...

View File

@ -1,14 +1,6 @@
Strings, bytes and Unicode conversions
######################################
.. note::
This section discusses string handling in terms of Python 3 strings. For
Python 2.7, replace all occurrences of ``str`` with ``unicode`` and
``bytes`` with ``str``. Python 2.7 users may find it best to use ``from
__future__ import unicode_literals`` to avoid unintentionally using ``str``
instead of ``unicode``.
Passing Python strings to C++
=============================
@ -58,9 +50,9 @@ Passing bytes to C++
--------------------
A Python ``bytes`` object will be passed to C++ functions that accept
``std::string`` or ``char*`` *without* conversion. On Python 3, in order to
make a function *only* accept ``bytes`` (and not ``str``), declare it as taking
a ``py::bytes`` argument.
``std::string`` or ``char*`` *without* conversion. In order to make a function
*only* accept ``bytes`` (and not ``str``), declare it as taking a ``py::bytes``
argument.
Returning C++ strings to Python
@ -109,8 +101,11 @@ conversion has the same overhead as implicit conversion.
m.def("str_output",
[]() {
std::string s = "Send your r\xe9sum\xe9 to Alice in HR"; // Latin-1
py::str py_s = PyUnicode_DecodeLatin1(s.data(), s.length());
return py_s;
py::handle py_s = PyUnicode_DecodeLatin1(s.data(), s.length(), nullptr);
if (!py_s) {
throw py::error_already_set();
}
return py::reinterpret_steal<py::str>(py_s);
}
);
@ -121,7 +116,8 @@ conversion has the same overhead as implicit conversion.
The `Python C API
<https://docs.python.org/3/c-api/unicode.html#built-in-codecs>`_ provides
several built-in codecs.
several built-in codecs. Note that these all return *new* references, so
use :cpp:func:`reinterpret_steal` when converting them to a :cpp:class:`str`.
One could also use a third party encoding library such as libiconv to transcode
@ -204,11 +200,6 @@ decoded to Python ``str``.
}
);
.. warning::
Wide character strings may not work as described on Python 2.7 or Python
3.3 compiled with ``--enable-unicode=ucs2``.
Strings in multibyte encodings such as Shift-JIS must transcoded to a
UTF-8/16/32 before being returned to Python.

View File

@ -45,7 +45,7 @@ Normally, the binding code for these classes would look as follows:
.. code-block:: cpp
PYBIND11_MODULE(example, m) {
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
py::class_<Animal>(m, "Animal")
.def("go", &Animal::go);
@ -64,7 +64,7 @@ helper class that is defined as follows:
.. code-block:: cpp
class PyAnimal : public Animal {
class PyAnimal : public Animal, public py::trampoline_self_life_support {
public:
/* Inherit the constructors */
using Animal::Animal;
@ -80,6 +80,24 @@ helper class that is defined as follows:
}
};
The ``py::trampoline_self_life_support`` base class is needed to ensure
that a ``std::unique_ptr`` can safely be passed between Python and C++. To
help you steer clear of notorious pitfalls (e.g. inheritance slicing),
pybind11 enforces that trampoline classes inherit from
``py::trampoline_self_life_support`` if used in in combination with
``py::smart_holder``.
.. note::
For completeness, the base class has no effect if a holder other than
``py::smart_holder`` used, including the default ``std::unique_ptr<T>``.
To avoid confusion, pybind11 will fail to compile bindings that combine
``py::trampoline_self_life_support`` with a holder other than
``py::smart_holder``.
Please think twice, though, before deciding to not use the safer
``py::smart_holder``. The pitfalls associated with avoiding it are very
real, and the overhead for using it is very likely in the noise.
The macro :c:macro:`PYBIND11_OVERRIDE_PURE` should be used for pure virtual
functions, and :c:macro:`PYBIND11_OVERRIDE` should be used for functions which have
a default implementation. There are also two alternate macros
@ -94,19 +112,19 @@ The binding code also needs a few minor adaptations (highlighted):
.. code-block:: cpp
:emphasize-lines: 2,3
PYBIND11_MODULE(example, m) {
py::class_<Animal, PyAnimal /* <--- trampoline*/>(m, "Animal")
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
py::class_<Animal, PyAnimal /* <--- trampoline */, py::smart_holder>(m, "Animal")
.def(py::init<>())
.def("go", &Animal::go);
py::class_<Dog, Animal>(m, "Dog")
py::class_<Dog, Animal, py::smart_holder>(m, "Dog")
.def(py::init<>());
m.def("call_go", &call_go);
}
Importantly, pybind11 is made aware of the trampoline helper class by
specifying it as an extra template argument to :class:`class_`. (This can also
specifying it as an extra template argument to ``py::class_``. (This can also
be combined with other template arguments such as a custom holder type; the
order of template types does not matter). Following this, we are able to
define a constructor as usual.
@ -116,9 +134,9 @@ Bindings should be made against the actual class, not the trampoline helper clas
.. code-block:: cpp
:emphasize-lines: 3
py::class_<Animal, PyAnimal /* <--- trampoline*/>(m, "Animal");
py::class_<Animal, PyAnimal /* <--- trampoline */, py::smart_holder>(m, "Animal");
.def(py::init<>())
.def("go", &PyAnimal::go); /* <--- THIS IS WRONG, use &Animal::go */
.def("go", &Animal::go); /* <--- DO NOT USE &PyAnimal::go HERE */
Note, however, that the above is sufficient for allowing python classes to
extend ``Animal``, but not ``Dog``: see :ref:`virtual_and_inheritance` for the
@ -133,14 +151,14 @@ a virtual method call.
>>> from example import *
>>> d = Dog()
>>> call_go(d)
u'woof! woof! woof! '
'woof! woof! woof! '
>>> class Cat(Animal):
... def go(self, n_times):
... return "meow! " * n_times
...
>>> c = Cat()
>>> call_go(c)
u'meow! meow! meow! '
'meow! meow! meow! '
If you are defining a custom constructor in a derived Python class, you *must*
ensure that you explicitly call the bound C++ constructor using ``__init__``,
@ -244,13 +262,13 @@ override the ``name()`` method):
.. code-block:: cpp
class PyAnimal : public Animal {
class PyAnimal : public Animal, public py::trampoline_self_life_support {
public:
using Animal::Animal; // Inherit constructors
std::string go(int n_times) override { PYBIND11_OVERRIDE_PURE(std::string, Animal, go, n_times); }
std::string name() override { PYBIND11_OVERRIDE(std::string, Animal, name, ); }
};
class PyDog : public Dog {
class PyDog : public Dog, public py::trampoline_self_life_support {
public:
using Dog::Dog; // Inherit constructors
std::string go(int n_times) override { PYBIND11_OVERRIDE(std::string, Dog, go, n_times); }
@ -272,7 +290,7 @@ declare or override any virtual methods itself:
.. code-block:: cpp
class Husky : public Dog {};
class PyHusky : public Husky {
class PyHusky : public Husky, public py::trampoline_self_life_support {
public:
using Husky::Husky; // Inherit constructors
std::string go(int n_times) override { PYBIND11_OVERRIDE_PURE(std::string, Husky, go, n_times); }
@ -287,13 +305,15 @@ follows:
.. code-block:: cpp
template <class AnimalBase = Animal> class PyAnimal : public AnimalBase {
template <class AnimalBase = Animal>
class PyAnimal : public AnimalBase, public py::trampoline_self_life_support {
public:
using AnimalBase::AnimalBase; // Inherit constructors
std::string go(int n_times) override { PYBIND11_OVERRIDE_PURE(std::string, AnimalBase, go, n_times); }
std::string name() override { PYBIND11_OVERRIDE(std::string, AnimalBase, name, ); }
};
template <class DogBase = Dog> class PyDog : public PyAnimal<DogBase> {
template <class DogBase = Dog>
class PyDog : public PyAnimal<DogBase>, public py::trampoline_self_life_support {
public:
using PyAnimal<DogBase>::PyAnimal; // Inherit constructors
// Override PyAnimal's pure virtual go() with a non-pure one:
@ -311,9 +331,9 @@ The classes are then registered with pybind11 using:
.. code-block:: cpp
py::class_<Animal, PyAnimal<>> animal(m, "Animal");
py::class_<Dog, Animal, PyDog<>> dog(m, "Dog");
py::class_<Husky, Dog, PyDog<Husky>> husky(m, "Husky");
py::class_<Animal, PyAnimal<>, py::smart_holder> animal(m, "Animal");
py::class_<Dog, Animal, PyDog<>, py::smart_holder> dog(m, "Dog");
py::class_<Husky, Dog, PyDog<Husky>, py::smart_holder> husky(m, "Husky");
// ... add animal, dog, husky definitions
Note that ``Husky`` did not require a dedicated trampoline template class at
@ -408,6 +428,51 @@ Python side by allowing the Python function to return ``None`` or an ``int``:
return false; // Alternatively return MyClass::myMethod(value);
}
Avoiding Inheritance Slicing and ``std::weak_ptr`` surprises
------------------------------------------------------------
When working with classes that use virtual functions and are subclassed
in Python, special care must be taken when converting Python objects to
``std::shared_ptr<T>``. Depending on whether the class uses a plain
``std::shared_ptr`` holder or ``py::smart_holder``, the resulting
``shared_ptr`` may either allow inheritance slicing or lead to potentially
surprising behavior when constructing ``std::weak_ptr`` instances.
This section explains how ``std::shared_ptr`` and ``py::smart_holder`` manage
object lifetimes differently, how these differences affect trampoline-derived
objects, and what options are available to achieve the situation-specific
desired behavior.
When using ``std::shared_ptr`` as the holder type, converting a Python object
to a ``std::shared_ptr<T>`` (e.g., ``obj.cast<std::shared_ptr<T>>()``, or simply
passing the Python object as an argument to a ``.def()``-ed function) returns
a ``shared_ptr`` that shares ownership with the original ``class_`` holder,
usually preserving object lifetime. However, for Python classes that derive from
a trampoline, if the Python object is destroyed, only the base C++ object may
remain alive, leading to inheritance slicing
(see `#1333 <https://github.com/pybind/pybind11/issues/1333>`_).
In contrast, with ``py::smart_holder``, converting a Python object to
a ``std::shared_ptr<T>`` returns a new ``shared_ptr`` with an independent
control block that keeps the derived Python object alive. This avoids
inheritance slicing but can lead to unintended behavior when creating
``std::weak_ptr`` instances
(see `#5623 <https://github.com/pybind/pybind11/issues/5623>`_).
If it is necessary to obtain a ``std::weak_ptr`` that shares the control block
with the ``smart_holder``—at the cost of reintroducing potential inheritance
slicing—you can use ``py::potentially_slicing_weak_ptr<T>(obj)``.
When precise lifetime management of derived Python objects is important,
using a Python-side ``weakref`` is the most reliable approach, as it avoids
both inheritance slicing and unintended interactions with ``std::weak_ptr``
semantics in C++.
.. seealso::
* :func:`potentially_slicing_weak_ptr` C++ documentation
* :file:`tests/test_potentially_slicing_weak_ptr.cpp`
.. _custom_constructors:
@ -499,12 +564,12 @@ an alias:
// ...
virtual ~Example() = default;
};
class PyExample : public Example {
class PyExample : public Example, public py::trampoline_self_life_support {
public:
using Example::Example;
PyExample(Example &&base) : Example(std::move(base)) {}
};
py::class_<Example, PyExample>(m, "Example")
py::class_<Example, PyExample, py::smart_holder>(m, "Example")
// Returns an Example pointer. If a PyExample is needed, the Example
// instance will be moved via the extra constructor in PyExample, above.
.def(py::init([]() { return new Example(); }))
@ -550,9 +615,10 @@ pybind11. The underlying issue is that the ``std::unique_ptr`` holder type that
is responsible for managing the lifetime of instances will reference the
destructor even if no deallocations ever take place. In order to expose classes
with private or protected destructors, it is possible to override the holder
type via a holder type argument to ``class_``. Pybind11 provides a helper class
``py::nodelete`` that disables any destructor invocations. In this case, it is
crucial that instances are deallocated on the C++ side to avoid memory leaks.
type via a holder type argument to ``py::class_``. Pybind11 provides a helper
class ``py::nodelete`` that disables any destructor invocations. In this case,
it is crucial that instances are deallocated on the C++ side to avoid memory
leaks.
.. code-block:: cpp
@ -708,7 +774,7 @@ to Python.
#include <pybind11/operators.h>
PYBIND11_MODULE(example, m) {
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
py::class_<Vector2>(m, "Vector2")
.def(py::init<float, float>())
.def(py::self + py::self)
@ -813,26 +879,20 @@ An instance can now be pickled as follows:
.. code-block:: python
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
import pickle
p = Pickleable("test_value")
p.setExtra(15)
data = pickle.dumps(p, 2)
data = pickle.dumps(p)
.. note::
Note that only the cPickle module is supported on Python 2.7.
The second argument to ``dumps`` is also crucial: it selects the pickle
protocol version 2, since the older version 1 is not supported. Newer
versions are also fine—for instance, specify ``-1`` to always use the
latest available version. Beware: failure to follow these instructions
will cause important pybind11 memory allocation routines to be skipped
during unpickling, which will likely lead to memory corruption and/or
segmentation faults.
If given, the second argument to ``dumps`` must be 2 or larger - 0 and 1 are
not supported. Newer versions are also fine; for instance, specify ``-1`` to
always use the latest available version. Beware: failure to follow these
instructions will cause important pybind11 memory allocation routines to be
skipped during unpickling, which will likely lead to memory corruption
and/or segmentation faults.
.. seealso::
@ -849,11 +909,9 @@ Python normally uses references in assignments. Sometimes a real copy is needed
to prevent changing all copies. The ``copy`` module [#f5]_ provides these
capabilities.
On Python 3, a class with pickle support is automatically also (deep)copy
A class with pickle support is automatically also (deep)copy
compatible. However, performance can be improved by adding custom
``__copy__`` and ``__deepcopy__`` methods. With Python 2.7, these custom methods
are mandatory for (deep)copy compatibility, because pybind11 only supports
cPickle.
``__copy__`` and ``__deepcopy__`` methods.
For simple classes (deep)copy can be enabled by using the copy constructor,
which should look as follows:
@ -879,7 +937,7 @@ Multiple Inheritance
pybind11 can create bindings for types that derive from multiple base types
(aka. *multiple inheritance*). To do so, specify all bases in the template
arguments of the ``class_`` declaration:
arguments of the ``py::class_`` declaration:
.. code-block:: cpp
@ -954,11 +1012,11 @@ because of conflicting definitions on the external type:
// dogs.cpp
// Binding for external library class:
py::class<pets::Pet>(m, "Pet")
py::class_<pets::Pet>(m, "Pet")
.def("name", &pets::Pet::name);
// Binding for local extension class:
py::class<Dog, pets::Pet>(m, "Dog")
py::class_<Dog, pets::Pet>(m, "Dog")
.def(py::init<std::string>());
.. code-block:: cpp
@ -966,11 +1024,11 @@ because of conflicting definitions on the external type:
// cats.cpp, in a completely separate project from the above dogs.cpp.
// Binding for external library class:
py::class<pets::Pet>(m, "Pet")
py::class_<pets::Pet>(m, "Pet")
.def("get_name", &pets::Pet::name);
// Binding for local extending class:
py::class<Cat, pets::Pet>(m, "Cat")
py::class_<Cat, pets::Pet>(m, "Cat")
.def(py::init<std::string>());
.. code-block:: pycon
@ -988,13 +1046,13 @@ the ``py::class_`` constructor:
.. code-block:: cpp
// Pet binding in dogs.cpp:
py::class<pets::Pet>(m, "Pet", py::module_local())
py::class_<pets::Pet>(m, "Pet", py::module_local())
.def("name", &pets::Pet::name);
.. code-block:: cpp
// Pet binding in cats.cpp:
py::class<pets::Pet>(m, "Pet", py::module_local())
py::class_<pets::Pet>(m, "Pet", py::module_local())
.def("get_name", &pets::Pet::name);
This makes the Python-side ``dogs.Pet`` and ``cats.Pet`` into distinct classes,
@ -1112,7 +1170,7 @@ described trampoline:
virtual int foo() const { return 42; }
};
class Trampoline : public A {
class Trampoline : public A, public py::trampoline_self_life_support {
public:
int foo() const override { PYBIND11_OVERRIDE(int, A, foo, ); }
};
@ -1122,16 +1180,9 @@ described trampoline:
using A::foo;
};
py::class_<A, Trampoline>(m, "A") // <-- `Trampoline` here
py::class_<A, Trampoline, py::smart_holder>(m, "A") // <-- `Trampoline` here
.def("foo", &Publicist::foo); // <-- `Publicist` here, not `Trampoline`!
.. note::
MSVC 2015 has a compiler bug (fixed in version 2017) which
requires a more explicit function binding in the form of
``.def("foo", static_cast<int (A::*)() const>(&Publicist::foo));``
where ``int (A::*)() const`` is the type of ``A::foo``.
Binding final classes
=====================
@ -1210,7 +1261,7 @@ but once again each instantiation must be explicitly specified:
T fn(V v);
};
py::class<MyClass<int>>(m, "MyClassT")
py::class_<MyClass<int>>(m, "MyClassT")
.def("fn", &MyClass<int>::fn<std::string>);
Custom automatic downcasters
@ -1242,7 +1293,7 @@ whether a downcast is safe, you can proceed by specializing the
std::string bark() const { return sound; }
};
namespace pybind11 {
namespace PYBIND11_NAMESPACE {
template<> struct polymorphic_type_hook<Pet> {
static const void *get(const Pet *src, const std::type_info*& type) {
// note that src may be nullptr
@ -1253,7 +1304,7 @@ whether a downcast is safe, you can proceed by specializing the
return src;
}
};
} // namespace pybind11
} // namespace PYBIND11_NAMESPACE
When pybind11 wants to convert a C++ pointer of type ``Base*`` to a
Python object, it calls ``polymorphic_type_hook<Base>::get()`` to
@ -1333,13 +1384,21 @@ You can do that using ``py::custom_type_setup``:
auto *type = &heap_type->ht_type;
type->tp_flags |= Py_TPFLAGS_HAVE_GC;
type->tp_traverse = [](PyObject *self_base, visitproc visit, void *arg) {
auto &self = py::cast<OwnsPythonObjects&>(py::handle(self_base));
Py_VISIT(self.value.ptr());
// https://docs.python.org/3/c-api/typeobj.html#c.PyTypeObject.tp_traverse
#if PY_VERSION_HEX >= 0x03090000
Py_VISIT(Py_TYPE(self_base));
#endif
if (py::detail::is_holder_constructed(self_base)) {
auto &self = py::cast<OwnsPythonObjects&>(py::handle(self_base));
Py_VISIT(self.value.ptr());
}
return 0;
};
type->tp_clear = [](PyObject *self_base) {
auto &self = py::cast<OwnsPythonObjects&>(py::handle(self_base));
self.value = py::none();
if (py::detail::is_holder_constructed(self_base)) {
auto &self = py::cast<OwnsPythonObjects&>(py::handle(self_base));
self.value = py::none();
}
return 0;
};
}));

View File

@ -0,0 +1,391 @@
# Double locking, deadlocking, GIL
[TOC]
## Introduction
### Overview
In concurrent programming with locks, *deadlocks* can arise when more than one
mutex is locked at the same time, and careful attention has to be paid to lock
ordering to avoid this. Here we will look at a common situation that occurs in
native extensions for CPython written in C++.
### Deadlocks
A deadlock can occur when more than one thread attempts to lock more than one
mutex, and two of the threads lock two of the mutexes in different orders. For
example, consider mutexes `mu1` and `mu2`, and threads T1 and T2, executing:
| | T1 | T2 |
|--- | ------------------- | -------------------|
|1 | `mu1.lock()`{.good} | `mu2.lock()`{.good}|
|2 | `mu2.lock()`{.bad} | `mu1.lock()`{.bad} |
|3 | `/* work */` | `/* work */` |
|4 | `mu2.unlock()` | `mu1.unlock()` |
|5 | `mu1.unlock()` | `mu2.unlock()` |
Now if T1 manages to lock `mu1` and T2 manages to lock `mu2` (as indicated in
green), then both threads will block while trying to lock the respective other
mutex (as indicated in red), but they are also unable to release the mutex that
they have locked (step 5).
**The problem** is that it is possible for one thread to attempt to lock `mu1`
and then `mu2`, and for another thread to attempt to lock `mu2` and then `mu1`.
Note that it does not matter if either mutex is unlocked at any intermediate
point; what matters is only the order of any attempt to *lock* the mutexes. For
example, the following, more complex series of operations is just as prone to
deadlock:
| | T1 | T2 |
|--- | ------------------- | -------------------|
|1 | `mu1.lock()`{.good} | `mu1.lock()`{.good}|
|2 | waiting for T2 | `mu2.lock()`{.good}|
|3 | waiting for T2 | `/* work */` |
|3 | waiting for T2 | `mu1.unlock()` |
|3 | `mu2.lock()`{.bad} | `/* work */` |
|3 | `/* work */` | `mu1.lock()`{.bad} |
|3 | `/* work */` | `/* work */` |
|4 | `mu2.unlock()` | `mu1.unlock()` |
|5 | `mu1.unlock()` | `mu2.unlock()` |
When the mutexes involved in a locking sequence are known at compile-time, then
avoiding deadlocks is &ldquo;merely&rdquo; a matter of arranging the lock
operations carefully so as to only occur in one single, fixed order. However, it
is also possible for mutexes to only be determined at runtime. A typical example
of this is a database where each row has its own mutex. An operation that
modifies two rows in a single transaction (e.g. &ldquo;transferring an amount
from one account to another&rdquo;) must lock two row mutexes, but the locking
order cannot be established at compile time. In this case, a dynamic
&ldquo;deadlock avoidance algorithm&rdquo; is needed. (In C++, `std::lock`
provides such an algorithm. An algorithm might use a non-blocking `try_lock`
operation on a mutex, which can either succeed or fail to lock the mutex, but
returns without blocking.)
Conceptually, one could also consider it a deadlock if _the same_ thread
attempts to lock a mutex that it has already locked (e.g. when some locked
operation accidentally recurses into itself): `mu.lock();`{.good}
`mu.lock();`{.bad} However, this is a slightly separate issue: Typical mutexes
are either of _recursive_ or _non-recursive_ kind. A recursive mutex allows
repeated locking and requires balanced unlocking. A non-recursive mutex can be
implemented more efficiently, and/but for efficiency reasons does not actually
guarantee a deadlock on second lock. Instead, the API simply forbids such use,
making it a precondition that the thread not already hold the mutex, with
undefined behaviour on violation.
### &ldquo;Once&rdquo; initialization
A common programming problem is to have an operation happen precisely once, even
if requested concurrently. While it is clear that we need to track in some
shared state somewhere whether the operation has already happened, it is worth
noting that this state only ever transitions, once, from `false` to `true`. This
is considerably simpler than a general shared state that can change values
arbitrarily. Next, we also need a mechanism for all but one thread to block
until the initialization has completed, which we can provide with a mutex. The
simplest solution just always locks the mutex:
```c++
// The "once" mechanism:
constinit absl::Mutex mu(absl::kConstInit);
constinit bool init_done = false;
// The operation of interest:
void f();
void InitOnceNaive() {
absl::MutexLock lock(&mu);
if (!init_done) {
f();
init_done = true;
}
}
```
This works, but the efficiency-minded reader will observe that once the
operation has completed, all future lock contention on the mutex is
unnecessary. This leads to the (in)famous &ldquo;double-locking&rdquo;
algorithm, which was historically hard to write correctly. The idea is to check
the boolean *before* locking the mutex, and avoid locking if the operation has
already completed. However, accessing shared state concurrently when at least
one access is a write is prone to causing a data race and needs to be done
according to an appropriate concurrent programming model. In C++ we use atomic
variables:
```c++
// The "once" mechanism:
constinit absl::Mutex mu(absl::kConstInit);
constinit std::atomic<bool> init_done = false;
// The operation of interest:
void f();
void InitOnceWithFastPath() {
if (!init_done.load(std::memory_order_acquire)) {
absl::MutexLock lock(&mu);
if (!init_done.load(std::memory_order_relaxed)) {
f();
init_done.store(true, std::memory_order_release);
}
}
}
```
Checking the flag now happens without holding the mutex lock, and if the
operation has already completed, we return immediately. After locking the mutex,
we need to check the flag again, since multiple threads can reach this point.
*Atomic details.* Since the atomic flag variable is accessed concurrently, we
have to think about the memory order of the accesses. There are two separate
cases: The first, outer check outside the mutex lock, and the second, inner
check under the lock. The outer check and the flag update form an
acquire/release pair: *if* the load sees the value `true` (which must have been
written by the store operation), then it also sees everything that happened
before the store, namely the operation `f()`. By contrast, the inner check can
use relaxed memory ordering, since in that case the mutex operations provide the
necessary ordering: if the inner load sees the value `true`, it happened after
the `lock()`, which happened after the `unlock()`, which happened after the
store.
The C++ standard library, and Abseil, provide a ready-made solution of this
algorithm called `std::call_once`/`absl::call_once`. (The interface is the same,
but the Abseil implementation is possibly better.)
```c++
// The "once" mechanism:
constinit absl::once_flag init_flag;
// The operation of interest:
void f();
void InitOnceWithCallOnce() {
absl::call_once(once_flag, f);
}
```
Even though conceptually this is performing the same algorithm, this
implementation has some considerable advantages: The `once_flag` type is a small
and trivial, integer-like type and is trivially destructible. Not only does it
take up less space than a mutex, it also generates less code since it does not
have to run a destructor, which would need to be added to the program's global
destructor list.
The final clou comes with the C++ semantics of a `static` variable declared at
block scope: According to [[stmt.dcl]](https://eel.is/c++draft/stmt.dcl#3):
> Dynamic initialization of a block variable with static storage duration or
> thread storage duration is performed the first time control passes through its
> declaration; such a variable is considered initialized upon the completion of
> its initialization. [...] If control enters the declaration concurrently while
> the variable is being initialized, the concurrent execution shall wait for
> completion of the initialization.
This is saying that the initialization of a local, `static` variable precisely
has the &ldquo;once&rdquo; semantics that we have been discussing. We can
therefore write the above example as follows:
```c++
// The operation of interest:
void f();
void InitOnceWithStatic() {
static int unused = (f(), 0);
}
```
This approach is by far the simplest and easiest, but the big difference is that
the mutex (or mutex-like object) in this implementation is no longer visible or
in the user&rsquo;s control. This is perfectly fine if the initializer is
simple, but if the initializer itself attempts to lock any other mutex
(including by initializing another static variable!), then we have no control
over the lock ordering!
Finally, you may have noticed the `constinit`s around the earlier code. Both
`constinit` and `constexpr` specifiers on a declaration mean that the variable
is *constant-initialized*, which means that no initialization is performed at
runtime (the initial value is already known at compile time). This in turn means
that a static variable guard mutex may not be needed, and static initialization
never blocks. The difference between the two is that a `constexpr`-specified
variable is also `const`, and a variable cannot be `constexpr` if it has a
non-trivial destructor. Such a destructor also means that the guard mutex is
needed after all, since the destructor must be registered to run at exit,
conditionally on initialization having happened.
## Python, CPython, GIL
With CPython, a Python program can call into native code. To this end, the
native code registers callback functions with the Python runtime via the CPython
API. In order to ensure that the internal state of the Python runtime remains
consistent, there is a single, shared mutex called the &ldquo;global interpreter
lock&rdquo;, or GIL for short. Upon entry of one of the user-provided callback
functions, the GIL is locked (or &ldquo;held&rdquo;), so that no other mutations
of the Python runtime state can occur until the native callback returns.
Many native extensions do not interact with the Python runtime for at least some
part of them, and so it is common for native extensions to _release_ the GIL, do
some work, and then reacquire the GIL before returning. Similarly, when code is
generally not holding the GIL but needs to interact with the runtime briefly, it
will first reacquire the GIL. The GIL is reentrant, and constructions to acquire
and subsequently release the GIL are common, and often don't worry about whether
the GIL is already held.
If the native code is written in C++ and contains local, `static` variables,
then we are now dealing with at least _two_ mutexes: the static variable guard
mutex, and the GIL from CPython.
A common problem in such code is an operation with &ldquo;only once&rdquo;
semantics that also ends up requiring the GIL to be held at some point. As per
the above description of &ldquo;once&rdquo;-style techniques, one might find a
static variable:
```c++
// CPython callback, assumes that the GIL is held on entry.
PyObject* InvokeWidget(PyObject* self) {
static PyObject* impl = CreateWidget();
return PyObject_CallOneArg(impl, self);
}
```
This seems reasonable, but bear in mind that there are two mutexes (the "guard
mutex" and "the GIL"), and we must think about the lock order. Otherwise, if the
callback is called from multiple threads, a deadlock may ensue.
Let us consider what we can see here: On entry, the GIL is already locked, and
we are locking the guard mutex. This is one lock order. Inside the initializer
`CreateWidget`, with both mutexes already locked, the function can freely access
the Python runtime.
However, it is entirely possible that `CreateWidget` will want to release the
GIL at one point and reacquire it later:
```c++
// Assumes that the GIL is held on entry.
// Ensures that the GIL is held on exit.
PyObject* CreateWidget() {
// ...
Py_BEGIN_ALLOW_THREADS // releases GIL
// expensive work, not accessing the Python runtime
Py_END_ALLOW_THREADS // acquires GIL, #!
// ...
return result;
}
```
Now we have a second lock order: the guard mutex is locked, and then the GIL is
locked (at `#!`). To see how this deadlocks, consider threads T1 and T2 both
having the runtime attempt to call `InvokeWidget`. T1 locks the GIL and
proceeds, locking the guard mutex and calling `CreateWidget`; T2 is blocked
waiting for the GIL. Then T1 releases the GIL to do &ldquo;expensive
work&rdquo;, and T2 awakes and locks the GIL. Now T2 is blocked trying to
acquire the guard mutex, but T1 is blocked reacquiring the GIL (at `#!`).
In other words: if we want to support &ldquo;once-called&rdquo; functions that
can arbitrarily release and reacquire the GIL, as is very common, then the only
lock order that we can ensure is: guard mutex first, GIL second.
To implement this, we must rewrite our code. Naively, we could always release
the GIL before a `static` variable with blocking initializer:
```c++
// CPython callback, assumes that the GIL is held on entry.
PyObject* InvokeWidget(PyObject* self) {
Py_BEGIN_ALLOW_THREADS // releases GIL
static PyObject* impl = CreateWidget();
Py_END_ALLOW_THREADS // acquires GIL
return PyObject_CallOneArg(impl, self);
}
```
But similar to the `InitOnceNaive` example above, this code cycles the GIL
(possibly descheduling the thread) even when the static variable has already
been initialized. If we want to avoid this, we need to abandon the use of a
static variable, since we do not control the guard mutex well enough. Instead,
we use an operation whose mutex locking is under our control, such as
`call_once`. For example:
```c++
// CPython callback, assumes that the GIL is held on entry.
PyObject* InvokeWidget(PyObject* self) {
static constinit PyObject* impl = nullptr;
static constinit std::atomic<bool> init_done = false;
static constinit absl::once_flag init_flag;
if (!init_done.load(std::memory_order_acquire)) {
Py_BEGIN_ALLOW_THREADS // releases GIL
absl::call_once(init_flag, [&]() {
PyGILState_STATE s = PyGILState_Ensure(); // acquires GIL
impl = CreateWidget();
PyGILState_Release(s); // releases GIL
init_done.store(true, std::memory_order_release);
});
Py_END_ALLOW_THREADS // acquires GIL
}
return PyObject_CallOneArg(impl, self);
}
```
The lock order is now always guard mutex first, GIL second. Unfortunately we
have to duplicate the &ldquo;double-checked done flag&rdquo;, effectively
leading to triple checking, because the flag state inside the `absl::once_flag`
is not accessible to the user. In other words, we cannot ask `init_flag` whether
it has been used yet.
However, we can perform one last, minor optimisation: since we assume that the
GIL is held on entry, and again when the initializing operation returns, the GIL
actually serializes access to our done flag variable, which therefore does not
need to be atomic. (The difference to the previous, atomic code may be small,
depending on the architecture. For example, on x86-64, acquire/release on a bool
is nearly free ([demo](https://godbolt.org/z/P9vYWf4fE)).)
```c++
// CPython callback, assumes that the GIL is held on entry, and indeed anywhere
// directly in this function (i.e. the GIL can be released inside CreateWidget,
// but must be reaqcuired when that call returns).
PyObject* InvokeWidget(PyObject* self) {
static constinit PyObject* impl = nullptr;
static constinit bool init_done = false; // guarded by GIL
static constinit absl::once_flag init_flag;
if (!init_done) {
Py_BEGIN_ALLOW_THREADS // releases GIL
// (multiple threads may enter here)
absl::call_once(init_flag, [&]() {
// (only one thread enters here)
PyGILState_STATE s = PyGILState_Ensure(); // acquires GIL
impl = CreateWidget();
init_done = true; // (GIL is held)
PyGILState_Release(s); // releases GIL
});
Py_END_ALLOW_THREADS // acquires GIL
}
return PyObject_CallOneArg(impl, self);
}
```
## Debugging tips
* Build with symbols.
* <kbd>Ctrl</kbd>-<kbd>C</kbd> sends `SIGINT`, <kbd>Ctrl</kbd>-<kbd>\\</kbd>
sends `SIGQUIT`. Both have their uses.
* Useful `gdb` commands:
* `py-bt` prints a Python backtrace if you are in a Python frame.
* `thread apply all bt 10` prints the top-10 frames for each thread. A
full backtrace can be prohibitively expensive, and the top few frames
are often good enough.
* `p PyGILState_Check()` shows whether a thread is holding the GIL. For
all threads, run `thread apply all p PyGILState_Check()` to find out
which thread is holding the GIL.
* The `static` variable guard mutex is accessed with functions like
`cxa_guard_acquire` (though this depends on ABI details and can vary).
The guard mutex itself contains information about which thread is
currently holding it.
## Links
* Article on
[double-checked locking](https://preshing.com/20130930/double-checked-locking-is-fixed-in-cpp11/)
* [The Deadlock Empire](https://deadlockempire.github.io/), hands-on exercises
to construct deadlocks

View File

@ -0,0 +1,179 @@
.. _deprecated:
Deprecated
##########
Support for Python 3.8 is deprecated and will be removed in 3.1.
Support for C++11 is deprecated and will be removed in a future version. Please
use at least C++14.
Support for FindPythonLibs (not available in CMake 3.26+ mode) is deprecated
and will be removed in a future version. The default mode is also going to
change to ``"new"`` from ``"compat"`` in the future.
The following features were deprecated before pybind11 3.0, and may be removed
in minor releases of pybind11 3.x.
.. list-table:: Deprecated Features
:header-rows: 1
:widths: 30 15 10
* - Feature
- Deprecated Version
- Year
* - ``py::metaclass()``
- 2.1
- 2017
* - ``PYBIND11_PLUGIN``
- 2.2
- 2017
* - ``py::set_error()`` replacing ``operator()``
- 2.12
- 2024
* - ``get_type_overload``
- 2.6
- 2020
* - ``call()``
- 2.0
- 2016
* - ``.str()``
- ?
-
* - ``.get_type()``
- 2.6
-
* - ``==`` and ``!=``
- 2.2
- 2017
* - ``.check()``
- ?
-
* - ``object(handle, bool)``
- ?
-
* - ``error_already_set.clear()``
- 2.2
- 2017
* - ``obj.attr(…)`` as ``bool``
- ?
-
* - ``.contains``
- ? (maybe 2.4)
-
* - ``py::capsule`` two-argument with destructor
- ?
-
.. _deprecated_enum:
``py::enum_``
=============
This is the original documentation for ``py::enum_``, which is deprecated
because it is not `PEP 435 compatible <https://peps.python.org/pep-0435/>`_
(see also `#2332 <https://github.com/pybind/pybind11/issues/2332>`_).
Please prefer ``py::native_enum`` (added with pybind11v3) when writing
new bindings. See :ref:`native_enum` for more information.
Let's suppose that we have an example class that contains internal types
like enumerations, e.g.:
.. code-block:: cpp
struct Pet {
enum Kind {
Dog = 0,
Cat
};
struct Attributes {
float age = 0;
};
Pet(const std::string &name, Kind type) : name(name), type(type) { }
std::string name;
Kind type;
Attributes attr;
};
The binding code for this example looks as follows:
.. code-block:: cpp
py::class_<Pet> pet(m, "Pet");
pet.def(py::init<const std::string &, Pet::Kind>())
.def_readwrite("name", &Pet::name)
.def_readwrite("type", &Pet::type)
.def_readwrite("attr", &Pet::attr);
py::enum_<Pet::Kind>(pet, "Kind")
.value("Dog", Pet::Kind::Dog)
.value("Cat", Pet::Kind::Cat)
.export_values();
py::class_<Pet::Attributes>(pet, "Attributes")
.def(py::init<>())
.def_readwrite("age", &Pet::Attributes::age);
To ensure that the nested types ``Kind`` and ``Attributes`` are created within the scope of ``Pet``, the
``pet`` ``py::class_`` instance must be supplied to the :class:`enum_` and ``py::class_``
constructor. The :func:`enum_::export_values` function exports the enum entries
into the parent scope, which should be skipped for newer C++11-style strongly
typed enums.
.. code-block:: pycon
>>> p = Pet("Lucy", Pet.Cat)
>>> p.type
Kind.Cat
>>> int(p.type)
1L
The entries defined by the enumeration type are exposed in the ``__members__`` property:
.. code-block:: pycon
>>> Pet.Kind.__members__
{'Dog': Kind.Dog, 'Cat': Kind.Cat}
The ``name`` property returns the name of the enum value as a unicode string.
.. note::
It is also possible to use ``str(enum)``, however these accomplish different
goals. The following shows how these two approaches differ.
.. code-block:: pycon
>>> p = Pet("Lucy", Pet.Cat)
>>> pet_type = p.type
>>> pet_type
Pet.Cat
>>> str(pet_type)
'Pet.Cat'
>>> pet_type.name
'Cat'
.. note::
When the special tag ``py::arithmetic()`` is specified to the ``enum_``
constructor, pybind11 creates an enumeration that also supports rudimentary
arithmetic and bit-level operations like comparisons, and, or, xor, negation,
etc.
.. code-block:: cpp
py::enum_<Pet::Kind>(pet, "Kind", py::arithmetic())
...
By default, these are omitted to conserve space.
.. warning::
Contrary to Python customs, enum values from the wrappers should not be compared using ``is``, but with ``==`` (see `#1177 <https://github.com/pybind/pybind11/issues/1177>`_ for background).

View File

@ -18,7 +18,7 @@ information, see :doc:`/compiling`.
.. code-block:: cmake
cmake_minimum_required(VERSION 3.4)
cmake_minimum_required(VERSION 3.15...4.0)
project(example)
find_package(pybind11 REQUIRED) # or `add_subdirectory(pybind11)`
@ -212,6 +212,11 @@ naturally:
assert(locals["message"].cast<std::string>() == "1 + 2 = 3");
}
``PYBIND11_EMBEDDED_MODULE`` also accepts
:func:`py::mod_gil_not_used()`,
:func:`py::multiple_interpreters::per_interpreter_gil()`, and
:func:`py::multiple_interpreters::shared_gil()` tags just like ``PYBIND11_MODULE``.
See :ref:`misc_subinterp` and :ref:`misc_free_threading` for more information.
Interpreter lifetime
====================
@ -232,31 +237,259 @@ global data. All the details can be found in the CPython documentation.
Creating two concurrent ``scoped_interpreter`` guards is a fatal error. So is
calling ``initialize_interpreter`` for a second time after the interpreter
has already been initialized.
has already been initialized. Use :class:`scoped_subinterpreter` to create
a sub-interpreter. See :ref:`subinterp` for important details on sub-interpreters.
Do not use the raw CPython API functions ``Py_Initialize`` and
``Py_Finalize`` as these do not properly handle the lifetime of
pybind11's internal data.
Sub-interpreter support
=======================
.. _subinterp:
Creating multiple copies of ``scoped_interpreter`` is not possible because it
represents the main Python interpreter. Sub-interpreters are something different
and they do permit the existence of multiple interpreters. This is an advanced
feature of the CPython API and should be handled with care. pybind11 does not
currently offer a C++ interface for sub-interpreters, so refer to the CPython
documentation for all the details regarding this feature.
Embedding Sub-interpreters
==========================
We'll just mention a couple of caveats the sub-interpreters support in pybind11:
A sub-interpreter is a separate interpreter instance which provides a
separate, isolated interpreter environment within the same process as the main
interpreter. Sub-interpreters are created and managed with a separate API from
the main interpreter. Beginning in Python 3.12, sub-interpreters each have
their own Global Interpreter Lock (GIL), which means that running a
sub-interpreter in a separate thread from the main interpreter can achieve true
concurrency.
1. Sub-interpreters will not receive independent copies of embedded modules.
Instead, these are shared and modifications in one interpreter may be
reflected in another.
pybind11's sub-interpreter API can be found in ``pybind11/subinterpreter.h``.
2. Managing multiple threads, multiple interpreters and the GIL can be
challenging and there are several caveats here, even within the pure
CPython API (please refer to the Python docs for details). As for
pybind11, keep in mind that ``gil_scoped_release`` and ``gil_scoped_acquire``
do not take sub-interpreters into account.
pybind11 :class:`subinterpreter` instances can be safely moved and shared between
threads as needed. However, managing multiple threads and the lifetimes of multiple
interpreters and their GILs can be challenging.
Proceed with caution (and lots of testing)!
The main interpreter must be initialized before creating a sub-interpreter, and
the main interpreter must outlive all sub-interpreters. Sub-interpreters are
managed through a different API than the main interpreter.
The :class:`subinterpreter` class manages the lifetime of sub-interpreters.
Instances are movable, but not copyable. Default constructing this class does
*not* create a sub-interpreter (it creates an empty holder). To create a
sub-interpreter, call :func:`subinterpreter::create()`.
.. warning::
Sub-interpreter creation acquires (and subsequently releases) the main
interpreter GIL. If another thread holds the main GIL, the function will
block until the main GIL can be acquired.
Sub-interpreter destruction temporarily activates the sub-interpreter. The
sub-interpreter must not be active (on any threads) at the time the
:class:`subinterpreter` destructor is called.
Both actions will re-acquire any interpreter's GIL that was held prior to
the call before returning (or return to no active interpreter if none was
active at the time of the call).
Each sub-interpreter will import a separate copy of each ``PYBIND11_EMBEDDED_MODULE``
when those modules specify a ``multiple_interpreters`` tag. If a module does not
specify a ``multiple_interpreters`` tag, then Python will report an ``ImportError``
if it is imported in a sub-interpreter.
pybind11 also has a :class:`scoped_subinterpreter` class, which creates and
activates a sub-interpreter when it is constructed, and deactivates and deletes
it when it goes out of scope.
Activating a Sub-interpreter
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Once a sub-interpreter is created, you can "activate" it on a thread (and
acquire its GIL) by creating a :class:`subinterpreter_scoped_activate`
instance and passing it the sub-intepreter to be activated. The function
will acquire the sub-interpreter's GIL and make the sub-interpreter the
current active interpreter on the current thread for the lifetime of the
instance. When the :class:`subinterpreter_scoped_activate` instance goes out
of scope, the sub-interpreter GIL is released and the prior interpreter that
was active on the thread (if any) is reactivated and it's GIL is re-acquired.
When using ``subinterpreter_scoped_activate``:
1. If the thread holds any interpreter's GIL:
- That GIL is released
2. The new sub-interpreter's GIL is acquired
3. The new sub-interpreter is made active.
4. When the scope ends:
- The sub-interpreter's GIL is released
- If there was a previous interpreter:
- The old interpreter's GIL is re-acquired
- The old interpreter is made active
- Otherwise, no interpreter is currently active and no GIL is held.
Example:
.. code-block:: cpp
py::initialize_interpreter();
// Main GIL is held
{
py::subinterpreter sub = py::subinterpreter::create();
// Main interpreter is still active, main GIL re-acquired
{
py::subinterpreter_scoped_activate guard(sub);
// Sub-interpreter active, thread holds sub's GIL
{
py::subinterpreter_scoped_activate main_guard(py);
// Sub's GIL was automatically released
// Main interpreter active, thread holds main's GIL
}
// Back to sub-interpreter, thread holds sub's GIL again
}
// Main interpreter is active, main's GIL is held
}
GIL API for sub-interpreters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:class:`gil_scoped_release` and :class:`gil_scoped_acquire` can be used to
manage the GIL of a sub-interpreter just as they do for the main interpreter.
They both manage the GIL of the currently active interpreter, without the
programmer having to do anything special or different. There is one important
caveat:
.. note::
When no interpreter is active through a
:class:`subinterpreter_scoped_activate` instance (such as on a new thread),
:class:`gil_scoped_acquire` will acquire the **main** GIL and
activate the **main** interpreter.
Full Sub-interpreter example
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here is an example showing how to create and activate sub-interpreters:
.. code-block:: cpp
#include <iostream>
#include <pybind11/embed.h>
#include <pybind11/subinterpreter.h>
namespace py = pybind11;
PYBIND11_EMBEDDED_MODULE(printer, m, py::multiple_interpreters::per_interpreter_gil()) {
m.def("which", [](const std::string& when) {
std::cout << when << "; Current Interpreter is "
<< py::subinterpreter::current().id()
<< std::endl;
});
}
int main() {
py::scoped_interpreter main_interp;
py::module_::import("printer").attr("which")("First init");
{
py::subinterpreter sub = py::subinterpreter::create();
py::module_::import("printer").attr("which")("Created sub");
{
py::subinterpreter_scoped_activate guard(sub);
try {
py::module_::import("printer").attr("which")("Activated sub");
}
catch (py::error_already_set &e) {
std::cerr << "EXCEPTION " << e.what() << std::endl;
return 1;
}
}
py::module_::import("printer").attr("which")("Deactivated sub");
{
py::gil_scoped_release nogil;
{
py::subinterpreter_scoped_activate guard(sub);
try {
{
py::subinterpreter_scoped_activate main_guard(py::subinterpreter::main());
try {
py::module_::import("printer").attr("which")("Main within sub");
}
catch (py::error_already_set &e) {
std::cerr << "EXCEPTION " << e.what() << std::endl;
return 1;
}
}
py::module_::import("printer").attr("which")("After Main, still within sub");
}
catch (py::error_already_set &e) {
std::cerr << "EXCEPTION " << e.what() << std::endl;
return 1;
}
}
}
}
py::module_::import("printer").attr("which")("At end");
return 0;
}
Expected output:
.. code-block:: text
First init; Current Interpreter is 0
Created sub; Current Interpreter is 0
Activated sub; Current Interpreter is 1
Deactivated sub; Current Interpreter is 0
Main within sub; Current Interpreter is 0
After Main, still within sub; Current Interpreter is 1
At end; Current Interpreter is 0
.. warning::
In Python 3.12 sub-interpreters must be destroyed in the same OS thread
that created them. Failure to follow this rule may result in deadlocks
or crashes when destroying the sub-interpreter on the wrong thread.
This constraint is not present in Python 3.13+.
Best Practices for sub-interpreter safety
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Never share Python objects across different interpreters.
- :class:`error_already_set` objects contain a reference to the Python exception type,
and :func:`error_already_set::what()` acquires the GIL. So Python exceptions must
**never** be allowed to propagate past the enclosing
:class:`subinterpreter_scoped_activate` instance!
(So your try/catch should be *just inside* the scope covered by the
:class:`subinterpreter_scoped_activate`.)
- Avoid global/static state whenever possible. Instead, keep state within each interpreter,
such as within the interpreter state dict, which can be accessed via
``subinterpreter::current().state_dict()``, or within instance members and tied to
Python objects.
- Avoid trying to "cache" Python objects in C++ variables across function calls (this is an easy
way to accidentally introduce sub-interpreter bugs). In the code example above, note that we
did not save the result of :func:`module_::import`, in order to avoid accidentally using the
resulting Python object when the wrong interpreter was active.
- Avoid moving or disarming RAII objects managing GIL and sub-interpreter lifetimes. Doing so can
lead to confusion about lifetimes. (For example, accidentally extending a
:class:`subinterpreter_scoped_activate` past the lifetime of it's :class:`subinterpreter`.)
- While sub-interpreters each have their own GIL, there can now be multiple independent GILs in one
program so you need to consider the possibility of deadlocks caused by multiple GILs and/or the
interactions of the GIL(s) and your C++ code's own locking.
- When using multiple threads to run independent sub-interpreters, the independent GILs allow
concurrent calls from different interpreters into the same C++ code from different threads.
So you must still consider the thread safety of your C++ code. Remember, in Python 3.12
sub-interpreters must be destroyed on the same thread that they were created on.
- Familiarize yourself with :ref:`misc_concurrency`.

View File

@ -127,8 +127,7 @@ before a global translator is tried.
Inside the translator, ``std::rethrow_exception`` should be used within
a try block to re-throw the exception. One or more catch clauses to catch
the appropriate exceptions should then be used with each clause using
``PyErr_SetString`` to set a Python exception or ``ex(string)`` to set
the python exception to a custom exception type (see below).
``py::set_error()`` (see below).
To declare a custom Python exception type, declare a ``py::exception`` variable
and use this in the associated exception translator (note: it is often useful
@ -142,14 +141,16 @@ standard python RuntimeError:
.. code-block:: cpp
static py::exception<MyCustomException> exc(m, "MyCustomError");
PYBIND11_CONSTINIT static py::gil_safe_call_once_and_store<py::object> exc_storage;
exc_storage.call_once_and_store_result(
[&]() { return py::exception<MyCustomException>(m, "MyCustomError"); });
py::register_exception_translator([](std::exception_ptr p) {
try {
if (p) std::rethrow_exception(p);
} catch (const MyCustomException &e) {
exc(e.what());
py::set_error(exc_storage.get_stored(), e.what());
} catch (const OtherException &e) {
PyErr_SetString(PyExc_RuntimeError, e.what());
py::set_error(PyExc_RuntimeError, e.what());
}
});
@ -168,8 +169,7 @@ section.
.. note::
Call either ``PyErr_SetString`` or a custom exception's call
operator (``exc(string)``) for every exception caught in a custom exception
Call ``py::set_error()`` for every exception caught in a custom exception
translator. Failure to do so will cause Python to crash with ``SystemError:
error return without exception set``.
@ -177,9 +177,12 @@ section.
may be explicitly (re-)thrown to delegate it to the other,
previously-declared existing exception translators.
Note that ``libc++`` and ``libstdc++`` `behave differently <https://stackoverflow.com/questions/19496643/using-clang-fvisibility-hidden-and-typeinfo-and-type-erasure/28827430>`_
with ``-fvisibility=hidden``. Therefore exceptions that are used across ABI boundaries need to be explicitly exported, as exercised in ``tests/test_exceptions.h``.
See also: "Problems with C++ exceptions" under `GCC Wiki <https://gcc.gnu.org/wiki/Visibility>`_.
Note that ``libc++`` and ``libstdc++`` `behave differently under macOS
<https://stackoverflow.com/questions/19496643/using-clang-fvisibility-hidden-and-typeinfo-and-type-erasure/28827430>`_
with ``-fvisibility=hidden``. Therefore exceptions that are used across ABI
boundaries need to be explicitly exported, as exercised in
``tests/test_exceptions.h``. See also:
"Problems with C++ exceptions" under `GCC Wiki <https://gcc.gnu.org/wiki/Visibility>`_.
Local vs Global Exception Translators
@ -197,7 +200,7 @@ If module1 has the following translator:
try {
if (p) std::rethrow_exception(p);
} catch (const std::invalid_argument &e) {
PyErr_SetString("module1 handled this")
py::set_error(PyExc_ArgumentError, "module1 handled this");
}
}
@ -209,7 +212,7 @@ and module2 has the following similar translator:
try {
if (p) std::rethrow_exception(p);
} catch (const std::invalid_argument &e) {
PyErr_SetString("module2 handled this")
py::set_error(PyExc_ArgumentError, "module2 handled this");
}
}
@ -309,11 +312,11 @@ error protocol, which is outlined here.
After calling the Python C API, if Python returns an error,
``throw py::error_already_set();``, which allows pybind11 to deal with the
exception and pass it back to the Python interpreter. This includes calls to
the error setting functions such as ``PyErr_SetString``.
the error setting functions such as ``py::set_error()``.
.. code-block:: cpp
PyErr_SetString(PyExc_TypeError, "C API type error demo");
py::set_error(PyExc_TypeError, "C API type error demo");
throw py::error_already_set();
// But it would be easier to simply...
@ -325,11 +328,33 @@ Alternately, to ignore the error, call `PyErr_Clear
Any Python error must be thrown or cleared, or Python/pybind11 will be left in
an invalid state.
Handling warnings from the Python C API
=======================================
Wrappers for handling Python warnings are provided in ``pybind11/warnings.h``.
This header must be included explicitly; it is not transitively included via
``pybind11/pybind11.h``.
Warnings can be raised with the ``warn`` function:
.. code-block:: cpp
py::warnings::warn("This is a warning!", PyExc_Warning);
// Optionally, a `stack_level` can be specified.
py::warnings::warn("Another one!", PyExc_DeprecationWarning, 3);
New warning types can be registered at the module level using ``new_warning_type``:
.. code-block:: cpp
py::warnings::new_warning_type(m, "CustomWarning", PyExc_RuntimeWarning);
Chaining exceptions ('raise from')
==================================
In Python 3.3 a mechanism for indicating that exceptions were caused by other
exceptions was introduced:
Python has a mechanism for indicating that exceptions were caused by other
exceptions:
.. code-block:: py
@ -340,7 +365,7 @@ exceptions was introduced:
To do a similar thing in pybind11, you can use the ``py::raise_from`` function. It
sets the current python error indicator, so to continue propagating the exception
you should ``throw py::error_already_set()`` (Python 3 only).
you should ``throw py::error_already_set()``.
.. code-block:: cpp
@ -365,8 +390,7 @@ Should they throw or fail to catch any exceptions in their call graph,
the C++ runtime calls ``std::terminate()`` to abort immediately.
Similarly, Python exceptions raised in a class's ``__del__`` method do not
propagate, but are logged by Python as an unraisable error. In Python 3.8+, a
`system hook is triggered
propagate, but ``sys.unraisablehook()`` `is triggered
<https://docs.python.org/3/library/sys.html#sys.unraisablehook>`_
and an auditing event is logged.

View File

@ -16,7 +16,7 @@ lifetime of objects managed by them. This can lead to issues when creating
bindings for functions that return a non-trivial type. Just by looking at the
type information, it is not clear whether Python should take charge of the
returned value and eventually free its resources, or if this is handled on the
C++ side. For this reason, pybind11 provides a several *return value policy*
C++ side. For this reason, pybind11 provides several *return value policy*
annotations that can be passed to the :func:`module_::def` and
:func:`class_::def` functions. The default policy is
:enum:`return_value_policy::automatic`.
@ -81,9 +81,11 @@ The following table provides an overview of available policies:
| | it is no longer used. Warning: undefined behavior will ensue when the C++ |
| | side deletes an object that is still referenced and used by Python. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::reference_internal` | Indicates that the lifetime of the return value is tied to the lifetime |
| | of a parent object, namely the implicit ``this``, or ``self`` argument of |
| | the called method or property. Internally, this policy works just like |
| :enum:`return_value_policy::reference_internal` | If the return value is an lvalue reference or a pointer, the parent object |
| | (the implicit ``this``, or ``self`` argument of the called method or |
| | property) is kept alive for at least the lifespan of the return value. |
| | **Otherwise this policy falls back to :enum:`return_value_policy::move` |
| | (see #5528).** Internally, this policy works just like |
| | :enum:`return_value_policy::reference` but additionally applies a |
| | ``keep_alive<0, 1>`` *call policy* (described in the next section) that |
| | prevents the parent object from being garbage collected as long as the |
@ -372,7 +374,7 @@ like so:
Keyword-only arguments
======================
Python 3 introduced keyword-only arguments by specifying an unnamed ``*``
Python implements keyword-only arguments by specifying an unnamed ``*``
argument in a function definition:
.. code-block:: python
@ -395,19 +397,18 @@ argument annotations when registering the function:
m.def("f", [](int a, int b) { /* ... */ },
py::arg("a"), py::kw_only(), py::arg("b"));
Note that you currently cannot combine this with a ``py::args`` argument. This
feature does *not* require Python 3 to work.
.. versionadded:: 2.6
As of pybind11 2.9, a ``py::args`` argument implies that any following arguments
are keyword-only, as if ``py::kw_only()`` had been specified in the same
relative location of the argument list as the ``py::args`` argument. The
``py::kw_only()`` may be included to be explicit about this, but is not
required. (Prior to 2.9 ``py::args`` may only occur at the end of the argument
list, or immediately before a ``py::kwargs`` argument at the end).
A ``py::args`` argument implies that any following arguments are keyword-only,
as if ``py::kw_only()`` had been specified in the same relative location of the
argument list as the ``py::args`` argument. The ``py::kw_only()`` may be
included to be explicit about this, but is not required.
.. versionchanged:: 2.9
This can now be combined with ``py::args``. Before, ``py::args`` could only
occur at the end of the argument list, or immediately before a ``py::kwargs``
argument at the end.
.. versionadded:: 2.9
Positional-only arguments
=========================

View File

@ -39,26 +39,55 @@ The ``PYBIND11_MAKE_OPAQUE`` macro does *not* require the above workarounds.
Global Interpreter Lock (GIL)
=============================
When calling a C++ function from Python, the GIL is always held.
The Python C API dictates that the Global Interpreter Lock (GIL) must always
be held by the current thread to safely access Python objects. As a result,
when Python calls into C++ via pybind11 the GIL must be held, and pybind11
will never implicitly release the GIL.
.. code-block:: cpp
void my_function() {
/* GIL is held when this function is called from Python */
}
PYBIND11_MODULE(example, m) {
m.def("my_function", &my_function);
}
pybind11 will ensure that the GIL is held when it knows that it is calling
Python code. For example, if a Python callback is passed to C++ code via
``std::function``, when C++ code calls the function the built-in wrapper
will acquire the GIL before calling the Python callback. Similarly, the
``PYBIND11_OVERRIDE`` family of macros will acquire the GIL before calling
back into Python.
When writing C++ code that is called from other C++ code, if that code accesses
Python state, it must explicitly acquire and release the GIL. A separate
document on deadlocks [#f8]_ elaborates on a particularly subtle interaction
with C++'s block-scope static variable initializer guard mutexes.
.. [#f8] See docs/advanced/deadlock.md
The classes :class:`gil_scoped_release` and :class:`gil_scoped_acquire` can be
used to acquire and release the global interpreter lock in the body of a C++
function call. In this way, long-running C++ code can be parallelized using
multiple Python threads. Taking :ref:`overriding_virtuals` as an example, this
multiple Python threads, **but great care must be taken** when any
:class:`gil_scoped_release` appear: if there is any way that the C++ code
can access Python objects, :class:`gil_scoped_acquire` should be used to
reacquire the GIL. Taking :ref:`overriding_virtuals` as an example, this
could be realized as follows (important changes highlighted):
.. code-block:: cpp
:emphasize-lines: 8,9,31,32
:emphasize-lines: 8,30,31
class PyAnimal : public Animal {
class PyAnimal : public Animal, public py::trampoline_self_life_support {
public:
/* Inherit the constructors */
using Animal::Animal;
/* Trampoline (need one for each virtual function) */
std::string go(int n_times) {
/* Acquire GIL before calling Python code */
py::gil_scoped_acquire acquire;
/* PYBIND11_OVERRIDE_PURE will acquire the GIL before accessing Python state */
PYBIND11_OVERRIDE_PURE(
std::string, /* Return type */
Animal, /* Parent class */
@ -69,16 +98,17 @@ could be realized as follows (important changes highlighted):
};
PYBIND11_MODULE(example, m) {
py::class_<Animal, PyAnimal> animal(m, "Animal");
py::class_<Animal, PyAnimal, py::smart_holder> animal(m, "Animal");
animal
.def(py::init<>())
.def("go", &Animal::go);
py::class_<Dog>(m, "Dog", animal)
py::class_<Dog, py::smart_holder>(m, "Dog", animal)
.def(py::init<>());
m.def("call_go", [](Animal *animal) -> std::string {
/* Release GIL before calling into (potentially long-running) C++ code */
// GIL is held when called from Python code. Release GIL before
// calling into (potentially long-running) C++ code
py::gil_scoped_release release;
return call_go(animal);
});
@ -92,6 +122,216 @@ The ``call_go`` wrapper can also be simplified using the ``call_guard`` policy
m.def("call_go", &call_go, py::call_guard<py::gil_scoped_release>());
.. _commongilproblems:
Common Sources Of Global Interpreter Lock Errors
==================================================================
Failing to properly hold the Global Interpreter Lock (GIL) is one of the
more common sources of bugs within code that uses pybind11. If you are
running into GIL related errors, we highly recommend you consult the
following checklist.
- Do you have any global variables that are pybind11 objects or invoke
pybind11 functions in either their constructor or destructor? You are generally
not allowed to invoke any Python function in a global static context. We recommend
using lazy initialization and then intentionally leaking at the end of the program.
- Do you have any pybind11 objects that are members of other C++ structures? One
commonly overlooked requirement is that pybind11 objects have to increase their reference count
whenever their copy constructor is called. Thus, you need to be holding the GIL to invoke
the copy constructor of any C++ class that has a pybind11 member. This can sometimes be very
tricky to track for complicated programs Think carefully when you make a pybind11 object
a member in another struct.
- C++ destructors that invoke Python functions can be particularly troublesome as
destructors can sometimes get invoked in weird and unexpected circumstances as a result
of exceptions.
- C++ static block-scope variable initialization that calls back into Python can
cause deadlocks; see [#f8]_ for a detailed discussion.
- You should try running your code in a debug build. That will enable additional assertions
within pybind11 that will throw exceptions on certain GIL handling errors
(reference counting operations).
.. _misc_free_threading:
Free-threading support
==================================================================
pybind11 supports the experimental free-threaded builds of Python versions 3.13+.
pybind11's internal data structures are thread safe. To enable your modules to be used with
free-threading, pass the :class:`mod_gil_not_used` tag as the third argument to
``PYBIND11_MODULE``.
For example:
.. code-block:: cpp
:emphasize-lines: 1
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
py::class_<Animal> animal(m, "Animal");
// etc
}
Importantly, enabling your module to be used with free-threading is also your promise that
your code is thread safe. Modules must still be built against the Python free-threading branch to
enable free-threading, even if they specify this tag. Adding this tag does not break
compatibility with non-free-threaded Python.
.. _misc_subinterp:
Sub-interpreter support
==================================================================
pybind11 supports isolated sub-interpreters, which are stable in Python 3.12+. pybind11's
internal data structures are sub-interpreter safe. To enable your modules to be imported in
isolated sub-interpreters, pass the :func:`multiple_interpreters::per_interpreter_gil()`
tag as the third or later argument to ``PYBIND11_MODULE``.
For example:
.. code-block:: cpp
:emphasize-lines: 1
PYBIND11_MODULE(example, m, py::multiple_interpreters::per_interpreter_gil()) {
py::class_<Animal> animal(m, "Animal");
// etc
}
Best Practices for Sub-interpreter Safety:
- Your initialization function will run for each interpreter that imports your module.
- Never share Python objects across different sub-interpreters.
- Avoid global/static state whenever possible. Instead, keep state within each interpreter,
such as in instance members tied to Python objects, :func:`globals()`, and the interpreter
state dict.
- Modules without any global/static state in their C++ code may already be sub-interpreter safe
without any additional work!
- Avoid trying to "cache" Python objects in C++ variables across function calls (this is an easy
way to accidentally introduce sub-interpreter bugs).
- While sub-interpreters each have their own GIL, there can now be multiple independent GILs in one
program, so concurrent calls into a module from two different sub-interpreters are still
possible. Therefore, your module still needs to consider thread safety.
pybind11 also supports "legacy" sub-interpreters which shared a single global GIL. You can enable
legacy-only behavior by using the :func:`multiple_interpreters::shared_gil()` tag in
``PYBIND11_MODULE``.
You can explicitly disable sub-interpreter support in your module by using the
:func:`multiple_interpreters::not_supported()` tag. This is the default behavior if you do not
specify a multiple_interpreters tag.
.. _misc_concurrency:
Concurrency and Parallelism in Python with pybind11
===================================================
Sub-interpreter support does not imply free-threading support or vice versa. Free-threading safe
modules can still have global/static state (as long as access to them is thread-safe), but
sub-interpreter safe modules cannot. Likewise, sub-interpreter safe modules can still rely on the
GIL, but free-threading safe modules cannot.
Here is a simple example module which has a function that calculates a value and returns the result
of the previous calculation.
.. code-block:: cpp
PYBIND11_MODULE(example, m) {
static size_t seed = 0;
m.def("calc_next", []() {
auto old = seed;
seed = (seed + 1) * 10;
return old;
});
This module is not free-threading safe because there is no synchronization on the number variable.
It is relatively easy to make this free-threading safe. One way is by using atomics, like this:
.. code-block:: cpp
:emphasize-lines: 1,2
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
static std::atomic<size_t> seed(0);
m.def("calc_next", []() {
size_t old, next;
do {
old = seed.load();
next = (old + 1) * 10;
} while (!seed.compare_exchange_weak(old, next));
return old;
});
}
The atomic variable and the compare-exchange guarantee a consistent behavior from this function even
when called currently from multiple threads at the same time.
However, the global/static integer is not sub-interpreter safe, because the calls in one
sub-interpreter will change what is seen in another. To fix it, the state needs to be specific to
each interpreter. One way to do that is by storing the state on another Python object, such as a
member of a class. For this simple example, we will store it in :func:`globals`.
.. code-block:: cpp
:emphasize-lines: 1,6
PYBIND11_MODULE(example, m, py::multiple_interpreters::per_interpreter_gil()) {
m.def("calc_next", []() {
if (!py::globals().contains("myseed"))
py::globals()["myseed"] = 0;
size_t old = py::globals()["myseed"];
py::globals()["myseed"] = (old + 1) * 10;
return old;
});
}
This module is sub-interpreter safe, for both ``shared_gil`` ("legacy") and
``per_interpreter_gil`` ("default") varieties. Multiple sub-interpreters could each call this same
function concurrently from different threads. This is safe because each sub-interpreter's GIL
protects it's own Python objects from concurrent access.
However, the module is no longer free-threading safe, for the same reason as
before, because the calculation is not synchronized. We can synchronize it
using a Python critical section. This will do nothing if not in free-threaded
Python. You can have it lock one or two Python objects. You cannot nest it.
.. warning::
When using a ``py::scoped_critical_section``, make sure it is not nested and
that no other synchronization primitives (such as a ``std::mutex``) are
held, which could lead to deadlocks. In 3.13, taking the same lock causes it
to release then reacquire, which means you can't use it to, for example, read
and write to a dictionary, because the dictionary uses a critical section
internally in CPython. Use a ``std::mutex`` instead if you need this on
Python 3.13. In 3.14, taking a lock on a locked object no longer releases
and relocks as an optimization, which also fixes this case.
.. code-block:: cpp
:emphasize-lines: 1,4,8
#include <pybind11/critical_section.h>
// ...
PYBIND11_MODULE(example, m, py::multiple_interpreters::per_interpreter_gil(), py::mod_gil_not_used()) {
m.def("calc_next", []() {
size_t old;
py::dict g = py::globals();
py::scoped_critical_section guard(g);
if (!g.contains("myseed"))
g["myseed"] = 0;
old = g["myseed"];
g["myseed"] = (old + 1) * 10;
return old;
});
}
The module is now both sub-interpreter safe and free-threading safe.
Binding sequence data types, iterators, the slicing protocol, etc.
==================================================================
@ -127,7 +367,7 @@ from Section :ref:`inheritance`.
Suppose now that ``Pet`` bindings are defined in a module named ``basic``,
whereas the ``Dog`` bindings are defined somewhere else. The challenge is of
course that the variable ``pet`` is not available anymore though it is needed
to indicate the inheritance relationship to the constructor of ``class_<Dog>``.
to indicate the inheritance relationship to the constructor of ``py::class_<Dog>``.
However, it can be acquired as follows:
.. code-block:: cpp
@ -139,7 +379,7 @@ However, it can be acquired as follows:
.def("bark", &Dog::bark);
Alternatively, you can specify the base class as a template parameter option to
``class_``, which performs an automated lookup of the corresponding Python
``py::class_``, which performs an automated lookup of the corresponding Python
type. Like the above code, however, this also requires invoking the ``import``
function once to ensure that the pybind11 binding code of the module ``basic``
has been executed:
@ -298,6 +538,15 @@ The class ``options`` allows you to selectively suppress auto-generated signatur
m.def("add", [](int a, int b) { return a + b; }, "A function which adds two numbers");
}
pybind11 also appends all members of an enum to the resulting enum docstring.
This default behavior can be disabled by using the ``disable_enum_members_docstring()``
function of the ``options`` class.
With ``disable_user_defined_docstrings()`` all user defined docstrings of
``module_::def()``, ``class_::def()`` and ``enum_()`` are disabled, but the
function signatures and enum members are included in the docstring, unless they
are disabled separately.
Note that changes to the settings affect only function bindings created during the
lifetime of the ``options`` instance. When it goes out of scope at the end of the module's init function,
the default settings are restored to prevent unwanted side effects.
@ -335,3 +584,32 @@ before they are used as a parameter or return type of a function:
pyFoo.def(py::init<const ns::Bar&>());
pyBar.def(py::init<const ns::Foo&>());
}
Setting inner type hints in docstrings
======================================
When you use pybind11 wrappers for ``list``, ``dict``, and other generic python
types, the docstring will just display the generic type. You can convey the
inner types in the docstring by using a special 'typed' version of the generic
type.
.. code-block:: cpp
PYBIND11_MODULE(example, m) {
m.def("pass_list_of_str", [](py::typing::List<py::str> arg) {
// arg can be used just like py::list
));
}
The resulting docstring will be ``pass_list_of_str(arg0: list[str]) -> None``.
The following special types are available in ``pybind11/typing.h``:
* ``py::Tuple<Args...>``
* ``py::Dict<K, V>``
* ``py::List<V>``
* ``py::Set<V>``
* ``py::Callable<Signature>``
.. warning:: Just like in python, these are merely hints. They don't actually
enforce the types of their contents at runtime or compile time.

View File

@ -87,8 +87,8 @@ buffer objects (e.g. a NumPy matrix).
/* Request a buffer descriptor from Python */
py::buffer_info info = b.request();
/* Some sanity checks ... */
if (info.format != py::format_descriptor<Scalar>::format())
/* Some basic validation checks ... */
if (!info.item_type_is_equivalent_to<Scalar>())
throw std::runtime_error("Incompatible format: expected a double array!");
if (info.ndim != 2)
@ -217,7 +217,7 @@ expects the type followed by field names:
};
// ...
PYBIND11_MODULE(test, m) {
PYBIND11_MODULE(test, m, py::mod_gil_not_used()) {
// ...
PYBIND11_NUMPY_DTYPE(A, x, y);
@ -232,6 +232,46 @@ prevent many types of unsupported structures, it is still the user's
responsibility to use only "plain" structures that can be safely manipulated as
raw memory without violating invariants.
Scalar types
============
In some cases we may want to accept or return NumPy scalar values such as
``np.float32`` or ``np.float64``. We hope to be able to handle single-precision
and double-precision on the C-side. However, both are bound to Python's
double-precision builtin float by default, so they cannot be processed separately.
We used the ``py::buffer`` trick to implement the previous approach, which
will cause the readability of the code to drop significantly.
Luckily, there's a helper type for this occasion - ``py::numpy_scalar``:
.. code-block:: cpp
m.def("add", [](py::numpy_scalar<float> a, py::numpy_scalar<float> b) {
return py::make_scalar(a + b);
});
m.def("add", [](py::numpy_scalar<double> a, py::numpy_scalar<double> b) {
return py::make_scalar(a + b);
});
This type is trivially convertible to and from the type it wraps; currently
supported scalar types are NumPy arithmetic types: ``bool_``, ``int8``,
``int16``, ``int32``, ``int64``, ``uint8``, ``uint16``, ``uint32``,
``uint64``, ``float32``, ``float64``, ``complex64``, ``complex128``, all of
them mapping to respective C++ counterparts.
.. note::
``py::numpy_scalar<T>`` strictly matches NumPy scalar types. For example,
``py::numpy_scalar<int64_t>`` will accept ``np.int64(123)``,
but **not** a regular Python ``int`` like ``123``.
.. note::
Native C types are mapped to NumPy types in a platform specific way: for
instance, ``char`` may be mapped to either ``np.int8`` or ``np.uint8``
and ``long`` may use 4 or 8 bytes depending on the platform. Unless you
clearly understand the difference and your needs, please use ``<cstdint>``.
Vectorizing functions
=====================
@ -311,7 +351,7 @@ simply using ``vectorize``).
return result;
}
PYBIND11_MODULE(test, m) {
PYBIND11_MODULE(test, m, py::mod_gil_not_used()) {
m.def("add_arrays", &add_arrays, "Add two NumPy arrays");
}
@ -378,8 +418,6 @@ uses of ``py::array``:
- ``.itemsize()`` returns the size of an item in bytes, i.e. ``sizeof(T)``.
- ``.ndim()`` returns the number of dimensions.
- ``.shape(n)`` returns the size of dimension ``n``
- ``.size()`` returns the total number of elements (i.e. the product of the shapes).
@ -395,11 +433,9 @@ uses of ``py::array``:
Ellipsis
========
Python 3 provides a convenient ``...`` ellipsis notation that is often used to
Python provides a convenient ``...`` ellipsis notation that is often used to
slice multidimensional arrays. For instance, the following snippet extracts the
middle dimensions of a tensor with the first and last index set to zero.
In Python 2, the syntactic sugar ``...`` is not available, but the singleton
``Ellipsis`` (of type ``ellipsis``) can still be used directly.
.. code-block:: python
@ -414,8 +450,6 @@ operation on the C++ side:
py::array a = /* A NumPy array */;
py::array b = a[py::make_tuple(0, py::ellipsis(), 0)];
.. versionchanged:: 2.6
``py::ellipsis()`` is now also available in Python 2.
Memory view
===========
@ -437,7 +471,7 @@ following:
{ 2, 4 }, // shape (rows, cols)
{ sizeof(uint8_t) * 4, sizeof(uint8_t) } // strides in bytes
);
})
});
This approach is meant for providing a ``memoryview`` for a C/C++ buffer not
managed by Python. The user is responsible for managing the lifetime of the
@ -453,11 +487,7 @@ We can also use ``memoryview::from_memory`` for a simple 1D contiguous buffer:
buffer, // buffer pointer
sizeof(uint8_t) * 8 // buffer size
);
})
.. note::
``memoryview::from_memory`` is not available in Python 2.
});
.. versionchanged:: 2.6
``memoryview::from_memory`` added.

View File

@ -1,11 +1,72 @@
Smart pointers
##############
.. _py_class_holder:
std::unique_ptr
===============
Smart pointers & ``py::class_``
###############################
Given a class ``Example`` with Python bindings, it's possible to return
instances wrapped in C++11 unique pointers, like so
The binding generator for classes, ``py::class_``, can be passed a template
type that denotes a special *holder* type that is used to manage references to
the object. If no such holder type template argument is given, the default for
a type ``T`` is ``std::unique_ptr<T>``.
.. note::
A ``py::class_`` for a given C++ type ``T`` — and all its derived types —
can only use a single holder type.
.. _smart_holder:
``py::smart_holder``
====================
Starting with pybind11v3, ``py::smart_holder`` is built into pybind11. It is
the recommended ``py::class_`` holder for most situations. However, for
backward compatibility it is **not** the default holder, and there are no
plans to make it the default holder in the future.
It is extremely easy to use the safer and more versatile ``py::smart_holder``:
simply add ``py::smart_holder`` to ``py::class_``:
* ``py::class_<T>`` to
* ``py::class_<T, py::smart_holder>``.
.. note::
A shorthand, ``py::classh<T>``, is provided for
``py::class_<T, py::smart_holder>``. The ``h`` in ``py::classh`` stands
for **smart_holder** but is shortened for brevity, ensuring it has the
same number of characters as ``py::class_``. This design choice facilitates
easy experimentation with ``py::smart_holder`` without introducing
distracting whitespace noise in diffs.
The ``py::smart_holder`` functionality includes the following:
* Support for **two-way** Python/C++ conversions for both
``std::unique_ptr<T>`` and ``std::shared_ptr<T>`` **simultaneously**.
* Passing a Python object back to C++ via ``std::unique_ptr<T>``, safely
**disowning** the Python object.
* Safely passing "trampoline" objects (objects with C++ virtual function
overrides implemented in Python, see :ref:`overriding_virtuals`) via
``std::unique_ptr<T>`` or ``std::shared_ptr<T>`` back to C++:
associated Python objects are automatically kept alive for the lifetime
of the smart-pointer.
* Full support for ``std::enable_shared_from_this`` (`cppreference
<http://en.cppreference.com/w/cpp/memory/enable_shared_from_this>`_).
``std::unique_ptr``
===================
This is the default ``py::class_`` holder and works as expected in
most situations. However, handling base-and-derived classes involves a
``reinterpret_cast``, which is, strictly speaking, undefined behavior.
Also note that the ``std::unique_ptr`` holder only supports passing a
``std::unique_ptr`` from C++ to Python, but not the other way around.
For example, the following code works as expected with ``py::class_<Example>``:
.. code-block:: cpp
@ -15,116 +76,54 @@ instances wrapped in C++11 unique pointers, like so
m.def("create_example", &create_example);
In other words, there is nothing special that needs to be done. While returning
unique pointers in this way is allowed, it is *illegal* to use them as function
arguments. For instance, the following function signature cannot be processed
by pybind11.
However, this will fail with ``py::class_<Example>`` (but works with
``py::class_<Example, py::smart_holder>``):
.. code-block:: cpp
void do_something_with_example(std::unique_ptr<Example> ex) { ... }
The above signature would imply that Python needs to give up ownership of an
object that is passed to this function, which is generally not possible (for
instance, the object might be referenced elsewhere).
.. note::
std::shared_ptr
===============
The ``reinterpret_cast`` mentioned above is `here
<https://github.com/pybind/pybind11/blob/30eb39ed79d1e2eeff15219ac00773034300a5e6/include/pybind11/cast.h#L235>`_.
For completeness: The same cast is also applied to ``py::smart_holder``,
but that is safe, because ``py::smart_holder`` is not templated.
The binding generator for classes, :class:`class_`, can be passed a template
type that denotes a special *holder* type that is used to manage references to
the object. If no such holder type template argument is given, the default for
a type named ``Type`` is ``std::unique_ptr<Type>``, which means that the object
is deallocated when Python's reference count goes to zero.
It is possible to switch to other types of reference counting wrappers or smart
pointers, which is useful in codebases that rely on them. For instance, the
following snippet causes ``std::shared_ptr`` to be used instead.
``std::shared_ptr``
===================
It is possible to use ``std::shared_ptr`` as the holder, for example:
.. code-block:: cpp
py::class_<Example, std::shared_ptr<Example> /* <- holder type */> obj(m, "Example");
py::class_<Example, std::shared_ptr<Example> /* <- holder type */>(m, "Example");
Note that any particular class can only be associated with a single holder type.
Compared to using ``py::class_<Example, py::smart_holder>``, there are two noteworthy disadvantages:
One potential stumbling block when using holder types is that they need to be
applied consistently. Can you guess what's broken about the following binding
code?
* Because a ``py::class_`` for a given C++ type ``T`` can only use a
single holder type, ``std::unique_ptr<T>`` cannot even be passed from C++
to Python. This will become apparent only at runtime, often through a
segmentation fault.
.. code-block:: cpp
* Similar to the ``std::unique_ptr`` holder, the handling of base-and-derived
classes involves a ``reinterpret_cast`` that has strictly speaking undefined
behavior, although it works as expected in most situations.
class Child { };
class Parent {
public:
Parent() : child(std::make_shared<Child>()) { }
Child *get_child() { return child.get(); } /* Hint: ** DON'T DO THIS ** */
private:
std::shared_ptr<Child> child;
};
PYBIND11_MODULE(example, m) {
py::class_<Child, std::shared_ptr<Child>>(m, "Child");
py::class_<Parent, std::shared_ptr<Parent>>(m, "Parent")
.def(py::init<>())
.def("get_child", &Parent::get_child);
}
The following Python code will cause undefined behavior (and likely a
segmentation fault).
.. code-block:: python
from example import Parent
print(Parent().get_child())
The problem is that ``Parent::get_child()`` returns a pointer to an instance of
``Child``, but the fact that this instance is already managed by
``std::shared_ptr<...>`` is lost when passing raw pointers. In this case,
pybind11 will create a second independent ``std::shared_ptr<...>`` that also
claims ownership of the pointer. In the end, the object will be freed **twice**
since these shared pointers have no way of knowing about each other.
There are two ways to resolve this issue:
1. For types that are managed by a smart pointer class, never use raw pointers
in function arguments or return values. In other words: always consistently
wrap pointers into their designated holder types (such as
``std::shared_ptr<...>``). In this case, the signature of ``get_child()``
should be modified as follows:
.. code-block:: cpp
std::shared_ptr<Child> get_child() { return child; }
2. Adjust the definition of ``Child`` by specifying
``std::enable_shared_from_this<T>`` (see cppreference_ for details) as a
base class. This adds a small bit of information to ``Child`` that allows
pybind11 to realize that there is already an existing
``std::shared_ptr<...>`` and communicate with it. In this case, the
declaration of ``Child`` should look as follows:
.. _cppreference: http://en.cppreference.com/w/cpp/memory/enable_shared_from_this
.. code-block:: cpp
class Child : public std::enable_shared_from_this<Child> { };
.. _smart_pointers:
Custom smart pointers
=====================
pybind11 supports ``std::unique_ptr`` and ``std::shared_ptr`` right out of the
box. For any other custom smart pointer, transparent conversions can be enabled
using a macro invocation similar to the following. It must be declared at the
top namespace level before any binding code:
For custom smart pointers (e.g. ``c10::intrusive_ptr`` in pytorch), transparent
conversions can be enabled using a macro invocation similar to the following.
It must be declared at the top namespace level before any binding code:
.. code-block:: cpp
PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>);
PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>)
The first argument of :func:`PYBIND11_DECLARE_HOLDER_TYPE` should be a
placeholder name that is used as a template parameter of the second argument.
@ -136,7 +135,7 @@ by default. Specify
.. code-block:: cpp
PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>, true);
PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>, true)
if ``SmartPtr<T>`` can always be initialized from a ``T*`` pointer without the
risk of inconsistencies (such as multiple independent ``SmartPtr`` instances
@ -154,10 +153,10 @@ specialized:
.. code-block:: cpp
// Always needed for custom holder types
PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>);
PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>)
// Only needed if the type's `.get()` goes by another name
namespace pybind11 { namespace detail {
namespace PYBIND11_NAMESPACE { namespace detail {
template <typename T>
struct holder_helper<SmartPtr<T>> { // <-- specialization
static const T *get(const SmartPtr<T> &p) { return p.getPointer(); }
@ -167,6 +166,12 @@ specialized:
The above specialization informs pybind11 that the custom ``SmartPtr`` class
provides ``.get()`` functionality via ``.getPointer()``.
.. note::
The two noteworthy disadvantages mentioned under the ``std::shared_ptr``
section apply similarly to custom smart pointer holders, but there is no
established safe alternative in this case.
.. seealso::
The file :file:`tests/test_smart_ptr.cpp` contains a complete example

View File

@ -32,8 +32,7 @@ The last line will both compile and run the tests.
Windows
-------
On Windows, only **Visual Studio 2015** and newer are supported since pybind11 relies
on various C++11 language features that break older versions of Visual Studio.
On Windows, only **Visual Studio 2017** and newer are supported.
.. Note::
@ -79,6 +78,13 @@ For brevity, all code examples assume that the following two lines are present:
namespace py = pybind11;
.. note::
``pybind11/pybind11.h`` includes ``Python.h``, as such it must be the first file
included in any source file or header for `the same reasons as Python.h`_.
.. _`the same reasons as Python.h`: https://docs.python.org/3/extending/extending.html#a-simple-example
Some features may require additional headers, but those will be specified as needed.
.. _simple_example:
@ -102,11 +108,13 @@ a file named :file:`example.cpp` with the following contents:
#include <pybind11/pybind11.h>
namespace py = pybind11;
int add(int i, int j) {
return i + j;
}
PYBIND11_MODULE(example, m) {
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
m.doc() = "pybind11 example plugin"; // optional module docstring
m.def("add", &add, "A function that adds two numbers");
@ -136,7 +144,7 @@ On Linux, the above example can be compiled using the following command:
.. code-block:: bash
$ c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) example.cpp -o example$(python3-config --extension-suffix)
$ c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) example.cpp -o example$(python3 -m pybind11 --extension-suffix)
.. note::
@ -166,12 +174,12 @@ load and execute the example:
.. code-block:: pycon
$ python
Python 2.7.10 (default, Aug 22 2015, 20:33:39)
[GCC 4.2.1 Compatible Apple LLVM 7.0.0 (clang-700.0.59.1)] on darwin
Python 3.9.10 (main, Jan 15 2022, 11:48:04)
[Clang 13.0.0 (clang-1300.0.29.3)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import example
>>> example.add(1, 2)
3L
3
>>>
.. _keyword_args:
@ -282,7 +290,7 @@ converted using the function ``py::cast``.
.. code-block:: cpp
PYBIND11_MODULE(example, m) {
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
m.attr("the_answer") = 42;
py::object world = py::cast("World");
m.attr("what") = world;

View File

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import datetime as dt
import os
import random
@ -12,27 +13,27 @@ def generate_dummy_code_pybind11(nclasses=10):
bindings = ""
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += f"class cl{cl:03};\n"
decl += "\n"
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
decl += f"class {cl:03} {{\n"
decl += "public:\n"
bindings += ' py::class_<cl%03i>(m, "cl%03i")\n' % (cl, cl)
bindings += f' py::class_<cl{cl:03}>(m, "cl{cl:03}")\n'
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += f" cl{ret:03} *fn_{fn:03}("
decl += ", ".join(f"cl{p:03} *" for p in params)
decl += ");\n"
bindings += ' .def("fn_%03i", &cl%03i::fn_%03i)\n' % (fn, cl, fn)
bindings += f' .def("fn_{fn:03}", &cl{cl:03}::fn_{fn:03})\n'
decl += "};\n\n"
bindings += " ;\n"
result = "#include <pybind11/pybind11.h>\n\n"
result += "namespace py = pybind11;\n\n"
result += decl + "\n"
result += "PYBIND11_MODULE(example, m) {\n"
result += "PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {\n"
result += bindings
result += "}"
return result
@ -43,23 +44,20 @@ def generate_dummy_code_boost(nclasses=10):
bindings = ""
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += f"class cl{cl:03};\n"
decl += "\n"
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
decl += f"class cl{cl:03} {{\n"
decl += "public:\n"
bindings += ' py::class_<cl%03i>("cl%03i")\n' % (cl, cl)
bindings += f' py::class_<cl{cl:03}>("cl{cl:03}")\n'
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += f" cl{ret:03} *fn_{fn:03}("
decl += ", ".join(f"cl{p:03} *" for p in params)
decl += ");\n"
bindings += (
' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\n'
% (fn, cl, fn)
)
bindings += f' .def("fn_{fn:03}", &cl{cl:03}::fn_{fn:03}, py::return_value_policy<py::manage_new_object>())\n'
decl += "};\n\n"
bindings += " ;\n"
@ -74,8 +72,8 @@ def generate_dummy_code_boost(nclasses=10):
for codegen in [generate_dummy_code_pybind11, generate_dummy_code_boost]:
print("{")
for i in range(0, 10):
nclasses = 2 ** i
for i in range(10):
nclasses = 2**i
with open("test.cpp", "w") as f:
f.write(codegen(nclasses))
n1 = dt.datetime.now()
@ -87,5 +85,5 @@ for codegen in [generate_dummy_code_pybind11, generate_dummy_code_boost]:
n2 = dt.datetime.now()
elapsed = (n2 - n1).total_seconds()
size = os.stat("test.so").st_size
print(" {%i, %f, %i}," % (nclasses * nfns, elapsed, size))
print(f" {{{nclasses * nfns}, {elapsed:.6f}, {size}}},")
print("}")

View File

@ -31,7 +31,7 @@ Here is an example of the binding code for one class:
};
...
PYBIND11_MODULE(example, m) {
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
...
py::class_<cl034>(m, "cl034")
.def("fn_000", &cl034::fn_000)

3234
thirdparty/pybind11/docs/changelog.md vendored Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -27,18 +27,25 @@ The binding code for ``Pet`` looks as follows:
namespace py = pybind11;
PYBIND11_MODULE(example, m) {
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
py::class_<Pet>(m, "Pet")
.def(py::init<const std::string &>())
.def("setName", &Pet::setName)
.def("getName", &Pet::getName);
}
:class:`class_` creates bindings for a C++ *class* or *struct*-style data
``py::class_`` creates bindings for a C++ *class* or *struct*-style data
structure. :func:`init` is a convenience function that takes the types of a
constructor's parameters as template arguments and wraps the corresponding
constructor (see the :ref:`custom_constructors` section for details). An
interactive Python session demonstrating this example is shown below:
constructor (see the :ref:`custom_constructors` section for details).
.. note::
Starting with pybind11v3, it is recommended to include `py::smart_holder`
in most situations for safety, especially if you plan to support conversions
to C++ smart pointers. See :ref:`smart_holder` for more information.
An interactive Python session demonstrating this example is shown below:
.. code-block:: pycon
@ -48,16 +55,26 @@ interactive Python session demonstrating this example is shown below:
>>> print(p)
<example.Pet object at 0x10cd98060>
>>> p.getName()
u'Molly'
'Molly'
>>> p.setName("Charly")
>>> p.getName()
u'Charly'
'Charly'
.. seealso::
Static member functions can be bound in the same way using
:func:`class_::def_static`.
.. note::
Binding C++ types in unnamed namespaces (also known as anonymous namespaces)
works reliably on many platforms, but not all. The `XFAIL_CONDITION` in
tests/test_unnamed_namespace_a.py encodes the currently known conditions.
For background see `#4319 <https://github.com/pybind/pybind11/pull/4319>`_.
If portability is a concern, it is therefore not recommended to bind C++
types in unnamed namespaces. It will be safest to manually pick unique
namespace names.
Keyword and default arguments
=============================
It is possible to specify keyword and default arguments using the syntax
@ -124,10 +141,10 @@ This makes it possible to write
>>> p = example.Pet("Molly")
>>> p.name
u'Molly'
'Molly'
>>> p.name = "Charly"
>>> p.name
u'Charly'
'Charly'
Now suppose that ``Pet::name`` was a private internal variable
that can only be accessed via setters and getters.
@ -248,7 +265,7 @@ inheritance relationship:
There are two different ways of indicating a hierarchical relationship to
pybind11: the first specifies the C++ base class as an extra template
parameter of the :class:`class_`:
parameter of the ``py::class_``:
.. code-block:: cpp
@ -262,7 +279,7 @@ parameter of the :class:`class_`:
.def("bark", &Dog::bark);
Alternatively, we can also assign a name to the previously bound ``Pet``
:class:`class_` object and reference it when binding the ``Dog`` class:
``py::class_`` object and reference it when binding the ``Dog`` class:
.. code-block:: cpp
@ -282,9 +299,9 @@ expose fields and methods of both types:
>>> p = example.Dog("Molly")
>>> p.name
u'Molly'
'Molly'
>>> p.bark()
u'woof!'
'woof!'
The C++ classes defined above are regular non-polymorphic types with an
inheritance relationship. This is reflected in Python:
@ -332,7 +349,7 @@ will automatically recognize this:
>>> type(p)
PolymorphicDog # automatically downcast
>>> p.bark()
u'woof!'
'woof!'
Given a pointer to a polymorphic base, pybind11 performs automatic downcasting
to the actual derived type. Note that this goes beyond the usual situation in
@ -434,8 +451,7 @@ you can use ``py::detail::overload_cast_impl`` with an additional set of parenth
.def("set", overload_cast_<int>()(&Pet::set), "Set the pet's age")
.def("set", overload_cast_<const std::string &>()(&Pet::set), "Set the pet's name");
.. [#cpp14] A compiler which supports the ``-std=c++14`` flag
or Visual Studio 2015 Update 2 and newer.
.. [#cpp14] A compiler which supports the ``-std=c++14`` flag.
.. note::
@ -443,6 +459,66 @@ you can use ``py::detail::overload_cast_impl`` with an additional set of parenth
other using the ``.def(py::init<...>())`` syntax. The existing machinery
for specifying keyword and default arguments also works.
☝️ Pitfalls with raw pointers and shared ownership
==================================================
``py::class_``-wrapped objects automatically manage the lifetime of the
wrapped C++ object, in collaboration with the chosen holder type (see
:ref:`py_class_holder`). When wrapping C++ functions involving raw pointers,
care needs to be taken to not accidentally undermine automatic lifetime
management. For example, ownership is inadvertently transferred here:
.. code-block:: cpp
class Child { };
class Parent {
public:
Parent() : child(std::make_shared<Child>()) { }
Child *get_child() { return child.get(); } /* DANGER */
private:
std::shared_ptr<Child> child;
};
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
py::class_<Child, std::shared_ptr<Child>>(m, "Child");
py::class_<Parent, std::shared_ptr<Parent>>(m, "Parent")
.def(py::init<>())
.def("get_child", &Parent::get_child); /* PROBLEM */
}
The following Python code leads to undefined behavior, likely resulting in
a segmentation fault.
.. code-block:: python
from example import Parent
print(Parent().get_child())
Part of the ``/* PROBLEM */`` here is that pybind11 falls back to using
``return_value_policy::take_ownership`` as the default (see
:ref:`return_value_policies`). The fact that the ``Child`` instance is
already managed by ``std::shared_ptr<Child>`` is lost. Therefore pybind11
will create a second independent ``std::shared_ptr<Child>`` that also
claims ownership of the pointer, eventually leading to heap-use-after-free
or double-free errors.
There are various ways to resolve this issue, either by changing
the ``Child`` or ``Parent`` C++ implementations (e.g. using
``std::enable_shared_from_this<Child>`` as a base class for
``Child``, or adding a member function to ``Parent`` that returns
``std::shared_ptr<Child>``), or if that is not feasible, by using
``return_value_policy::reference_internal``. What is the best approach
depends on the exact situation.
A highly effective way to stay in the clear — even in pure C++, but
especially when binding C++ code to Python — is to consistently prefer
``std::shared_ptr`` or ``std::unique_ptr`` over passing raw pointers.
.. _native_enum:
Enumerations and internal types
===============================
@ -471,6 +547,8 @@ The binding code for this example looks as follows:
.. code-block:: cpp
#include <pybind11/native_enum.h> // Not already included with pybind11.h
py::class_<Pet> pet(m, "Pet");
pet.def(py::init<const std::string &, Pet::Kind>())
@ -478,65 +556,97 @@ The binding code for this example looks as follows:
.def_readwrite("type", &Pet::type)
.def_readwrite("attr", &Pet::attr);
py::enum_<Pet::Kind>(pet, "Kind")
py::native_enum<Pet::Kind>(pet, "Kind", "enum.Enum")
.value("Dog", Pet::Kind::Dog)
.value("Cat", Pet::Kind::Cat)
.export_values();
.export_values()
.finalize();
py::class_<Pet::Attributes> attributes(pet, "Attributes")
py::class_<Pet::Attributes>(pet, "Attributes")
.def(py::init<>())
.def_readwrite("age", &Pet::Attributes::age);
To ensure that the nested types ``Kind`` and ``Attributes`` are created within the scope of ``Pet``, the
``pet`` :class:`class_` instance must be supplied to the :class:`enum_` and :class:`class_`
constructor. The :func:`enum_::export_values` function exports the enum entries
into the parent scope, which should be skipped for newer C++11-style strongly
typed enums.
To ensure that the nested types ``Kind`` and ``Attributes`` are created
within the scope of ``Pet``, the ``pet`` ``py::class_`` instance must be
supplied to the ``py::native_enum`` and ``py::class_`` constructors. The
``.export_values()`` function is available for exporting the enum entries
into the parent scope, if desired.
.. code-block:: pycon
>>> p = Pet("Lucy", Pet.Cat)
>>> p.type
Kind.Cat
>>> int(p.type)
1L
The entries defined by the enumeration type are exposed in the ``__members__`` property:
.. code-block:: pycon
>>> Pet.Kind.__members__
{'Dog': Kind.Dog, 'Cat': Kind.Cat}
The ``name`` property returns the name of the enum value as a unicode string.
``py::native_enum`` was introduced with pybind11v3. It binds C++ enum types
to native Python enum types, typically types in Python's
`stdlib enum <https://docs.python.org/3/library/enum.html>`_ module,
which are `PEP 435 compatible <https://peps.python.org/pep-0435/>`_.
This is the recommended way to bind C++ enums.
The older ``py::enum_`` is not PEP 435 compatible
(see `issue #2332 <https://github.com/pybind/pybind11/issues/2332>`_)
but remains supported indefinitely for backward compatibility.
New bindings should prefer ``py::native_enum``.
.. note::
It is also possible to use ``str(enum)``, however these accomplish different
goals. The following shows how these two approaches differ.
The deprecated ``py::enum_`` is :ref:`documented here <deprecated_enum>`.
.. code-block:: pycon
The ``.finalize()`` call above is needed because Python's native enums
cannot be built incrementally — all name/value pairs need to be passed at
once. To achieve this, ``py::native_enum`` acts as a buffer to collect the
name/value pairs. The ``.finalize()`` call uses the accumulated name/value
pairs to build the arguments for constructing a native Python enum type.
>>> p = Pet("Lucy", Pet.Cat)
>>> pet_type = p.type
>>> pet_type
Pet.Cat
>>> str(pet_type)
'Pet.Cat'
>>> pet_type.name
'Cat'
The ``py::native_enum`` constructor takes a third argument,
``native_type_name``, which specifies the fully qualified name of the Python
base class to use — e.g., ``"enum.Enum"`` or ``"enum.IntEnum"``. A fourth
optional argument, ``class_doc``, provides the docstring for the generated
class.
For example:
.. code-block:: cpp
py::native_enum<Pet::Kind>(pet, "Kind", "enum.IntEnum", "Constant specifying the kind of pet")
You may use any fully qualified Python name for ``native_type_name``.
The only requirement is that the named type is similar to
`enum.Enum <https://docs.python.org/3/library/enum.html#enum.Enum>`_
in these ways:
* Has a `constructor similar to that of enum.Enum
<https://docs.python.org/3/howto/enum.html#functional-api>`_::
Colors = enum.Enum("Colors", (("Red", 0), ("Green", 1)))
* A `C++ underlying <https://en.cppreference.com/w/cpp/types/underlying_type>`_
enum value can be passed to the constructor for the Python enum value::
red = Colors(0)
* The enum values have a ``.value`` property yielding a value that
can be cast to the C++ underlying type::
underlying = red.value
As of Python 3.13, the compatible `types in the stdlib enum module
<https://docs.python.org/3/library/enum.html#module-contents>`_ are:
``Enum``, ``IntEnum``, ``Flag``, ``IntFlag``.
.. note::
When the special tag ``py::arithmetic()`` is specified to the ``enum_``
constructor, pybind11 creates an enumeration that also supports rudimentary
arithmetic and bit-level operations like comparisons, and, or, xor, negation,
etc.
In rare cases, a C++ enum may be bound to Python via a
:ref:`custom type caster <custom_type_caster>`. In such cases, a
template specialization like this may be required:
.. code-block:: cpp
py::enum_<Pet::Kind>(pet, "Kind", py::arithmetic())
...
#if defined(PYBIND11_HAS_NATIVE_ENUM)
namespace pybind11::detail {
template <typename FancyEnum>
struct type_caster_enum_type_enabled<
FancyEnum,
enable_if_t<is_fancy_enum<FancyEnum>::value>> : std::false_type {};
}
#endif
By default, these are omitted to conserve space.
This specialization is needed only if the custom type caster is templated.
The ``PYBIND11_HAS_NATIVE_ENUM`` guard is needed only if backward
compatibility with pybind11v2 is required.

View File

@ -3,15 +3,123 @@
Build systems
#############
For an overview of Python packaging including compiled packaging with a pybind11
example, along with a cookiecutter that includes several pybind11 options, see
the `Scientific Python Development Guide`_.
.. _Scientific Python Development Guide: https://learn.scientific-python.org/development/guides/packaging-compiled/
.. scikit-build-core:
Modules with CMake
==================
A Python extension module can be created with just a few lines of code:
.. code-block:: cmake
cmake_minimum_required(VERSION 3.15...4.0)
project(example LANGUAGES CXX)
set(PYBIND11_FINDPYTHON ON)
find_package(pybind11 CONFIG REQUIRED)
pybind11_add_module(example example.cpp)
install(TARGETS example DESTINATION .)
(You use the ``add_subdirectory`` instead, see the example in :ref:`cmake`.) In
this example, the code is located in a file named :file:`example.cpp`. Either
method will import the pybind11 project which provides the
``pybind11_add_module`` function. It will take care of all the details needed
to build a Python extension module on any platform.
To build with pip, build, cibuildwheel, uv, or other Python tools, you can
add a ``pyproject.toml`` file like this:
.. code-block:: toml
[build-system]
requires = ["scikit-build-core", "pybind11"]
build-backend = "scikit_build_core.build"
[project]
name = "example"
version = "0.1.0"
You don't need setuptools files like ``MANIFEST.in``, ``setup.py``, or
``setup.cfg``, as this is not setuptools. See `scikit-build-core`_ for details.
For projects you plan to upload to PyPI, be sure to fill out the ``[project]``
table with other important metadata as well (see `Writing pyproject.toml`_).
A working sample project can be found in the [scikit_build_example]_
repository. An older and harder-to-maintain method is in [cmake_example]_. More
details about our cmake support can be found below in :ref:`cmake`.
.. _scikit-build-core: https://scikit-build-core.readthedocs.io
.. [scikit_build_example] https://github.com/pybind/scikit_build_example
.. [cmake_example] https://github.com/pybind/cmake_example
.. _modules-meson-python:
Modules with meson-python
=========================
You can also build a package with `Meson`_ using `meson-python`_, if you prefer
that. Your ``meson.build`` file would look something like this:
.. _meson-example:
.. code-block:: meson
project(
'example',
'cpp',
version: '0.1.0',
default_options: [
'cpp_std=c++11',
],
)
py = import('python').find_installation(pure: false)
pybind11_dep = dependency('pybind11')
py.extension_module('example',
'example.cpp',
install: true,
dependencies : [pybind11_dep],
)
And you would need a ``pyproject.toml`` file like this:
.. code-block:: toml
[build-system]
requires = ["meson-python", "pybind11"]
build-backend = "mesonpy"
Meson-python *requires* your project to be in git (or mercurial) as it uses it
for the SDist creation. For projects you plan to upload to PyPI, be sure to fill out the
``[project]`` table as well (see `Writing pyproject.toml`_).
.. _Writing pyproject.toml: https://packaging.python.org/en/latest/guides/writing-pyproject-toml
.. _meson: https://mesonbuild.com
.. _meson-python: https://meson-python.readthedocs.io/en/latest
.. _build-setuptools:
Building with setuptools
========================
Modules with setuptools
=======================
For projects on PyPI, building with setuptools is the way to go. Sylvain Corlay
has kindly provided an example project which shows how to set up everything,
including automatic generation of documentation using Sphinx. Please refer to
the [python_example]_ repository.
For projects on PyPI, a historically popular option is setuptools. Sylvain
Corlay has kindly provided an example project which shows how to set up
everything, including automatic generation of documentation using Sphinx.
Please refer to the [python_example]_ repository.
.. [python_example] https://github.com/pybind/python_example
@ -21,11 +129,11 @@ To use pybind11 inside your ``setup.py``, you have to have some system to
ensure that ``pybind11`` is installed when you build your package. There are
four possible ways to do this, and pybind11 supports all four: You can ask all
users to install pybind11 beforehand (bad), you can use
:ref:`setup_helpers-pep518` (good, but very new and requires Pip 10),
:ref:`setup_helpers-setup_requires` (discouraged by Python packagers now that
PEP 518 is available, but it still works everywhere), or you can
:ref:`setup_helpers-copy-manually` (always works but you have to manually sync
your copy to get updates).
:ref:`setup_helpers-pep518` (good), ``setup_requires=`` (discouraged), or you
can :ref:`setup_helpers-copy-manually` (works but you have to manually sync
your copy to get updates). Third party packagers like conda-forge generally
strongly prefer the ``pyproject.toml`` method, as it gives them control over
the ``pybind11`` version, and they may apply patches, etc.
An example of a ``setup.py`` using pybind11's helpers:
@ -122,70 +230,41 @@ version number that includes the number of commits since your last tag and a
hash for a dirty directory. Another way to force a rebuild is purge your cache
or use Pip's ``--no-cache-dir`` option.
You also need a ``MANIFEST.in`` file to include all relevant files so that you
can make an SDist. If you use `pypa-build`_, that will build an SDist then a
wheel from that SDist by default, so you can look inside those files (wheels
are just zip files with a ``.whl`` extension) to make sure you aren't missing
files. `check-manifest`_ (setuptools specific) or `check-sdist`_ (general) are
CLI tools that can compare the SDist contents with your source control.
.. [Ccache] https://ccache.dev
.. [setuptools_scm] https://github.com/pypa/setuptools_scm
.. _setup_helpers-pep518:
PEP 518 requirements (Pip 10+ required)
---------------------------------------
Build requirements
------------------
If you use `PEP 518's <https://www.python.org/dev/peps/pep-0518/>`_
``pyproject.toml`` file, you can ensure that ``pybind11`` is available during
the compilation of your project. When this file exists, Pip will make a new
virtual environment, download just the packages listed here in ``requires=``,
and build a wheel (binary Python package). It will then throw away the
environment, and install your wheel.
With a ``pyproject.toml`` file, you can ensure that ``pybind11`` is available
during the compilation of your project. When this file exists, Pip will make a
new virtual environment, download just the packages listed here in
``requires=``, and build a wheel (binary Python package). It will then throw
away the environment, and install your wheel.
Your ``pyproject.toml`` file will likely look something like this:
.. code-block:: toml
[build-system]
requires = ["setuptools>=42", "wheel", "pybind11~=2.6.1"]
requires = ["setuptools", "pybind11"]
build-backend = "setuptools.build_meta"
.. note::
The main drawback to this method is that a `PEP 517`_ compliant build tool,
such as Pip 10+, is required for this approach to work; older versions of
Pip completely ignore this file. If you distribute binaries (called wheels
in Python) using something like `cibuildwheel`_, remember that ``setup.py``
and ``pyproject.toml`` are not even contained in the wheel, so this high
Pip requirement is only for source builds, and will not affect users of
your binary wheels. If you are building SDists and wheels, then
`pypa-build`_ is the recommended official tool.
.. _PEP 517: https://www.python.org/dev/peps/pep-0517/
.. _cibuildwheel: https://cibuildwheel.readthedocs.io
.. _pypa-build: https://pypa-build.readthedocs.io/en/latest/
.. _setup_helpers-setup_requires:
Classic ``setup_requires``
--------------------------
If you want to support old versions of Pip with the classic
``setup_requires=["pybind11"]`` keyword argument to setup, which triggers a
two-phase ``setup.py`` run, then you will need to use something like this to
ensure the first pass works (which has not yet installed the ``setup_requires``
packages, since it can't install something it does not know about):
.. code-block:: python
try:
from pybind11.setup_helpers import Pybind11Extension
except ImportError:
from setuptools import Extension as Pybind11Extension
It doesn't matter that the Extension class is not the enhanced subclass for the
first pass run; and the second pass will have the ``setup_requires``
requirements.
This is obviously more of a hack than the PEP 518 method, but it supports
ancient versions of Pip.
.. _cibuildwheel: https://cibuildwheel.pypa.io
.. _pypa-build: https://build.pypa.io/en/latest/
.. _check-manifest: https://pypi.io/project/check-manifest
.. _check-sdist: https://pypi.io/project/check-sdist
.. _setup_helpers-copy-manually:
@ -231,36 +310,33 @@ the C++ source file. Python is then able to find the module and load it.
.. [cppimport] https://github.com/tbenthompson/cppimport
.. _cmake:
Building with CMake
===================
For C++ codebases that have an existing CMake-based build system, a Python
extension module can be created with just a few lines of code:
extension module can be created with just a few lines of code, as seen above in
the module section. Pybind11 currently defaults to the old mechanism, though be
aware that CMake 3.27 removed the old mechanism, so pybind11 will automatically
switch if the old mechanism is not available. Please opt into the new mechanism
if at all possible. Our default may change in future versions. This is the
minimum required:
.. code-block:: cmake
cmake_minimum_required(VERSION 3.4...3.18)
project(example LANGUAGES CXX)
add_subdirectory(pybind11)
pybind11_add_module(example example.cpp)
This assumes that the pybind11 repository is located in a subdirectory named
:file:`pybind11` and that the code is located in a file named :file:`example.cpp`.
The CMake command ``add_subdirectory`` will import the pybind11 project which
provides the ``pybind11_add_module`` function. It will take care of all the
details needed to build a Python extension module on any platform.
A working sample project, including a way to invoke CMake from :file:`setup.py` for
PyPI integration, can be found in the [cmake_example]_ repository.
.. [cmake_example] https://github.com/pybind/cmake_example
.. versionchanged:: 2.6
CMake 3.4+ is required.
.. versionchanged:: 2.11
CMake 3.5+ is required.
.. versionchanged:: 2.14
CMake 3.15+ is required.
Further information can be found at :doc:`cmake/index`.
pybind11_add_module
@ -315,7 +391,7 @@ that will be respected instead of the built-in flag search.
The ``OPT_SIZE`` flag enables size-based optimization equivalent to the
standard ``/Os`` or ``-Os`` compiler flags and the ``MinSizeRel`` build type,
which avoid optimizations that that can substantially increase the size of the
which avoid optimizations that can substantially increase the size of the
resulting binary. This flag is particularly useful in projects that are split
into performance-critical parts and associated bindings. In this case, we can
compile the project in release mode (and hence, optimize performance globally),
@ -353,7 +429,7 @@ with ``PYTHON_EXECUTABLE``. For example:
.. code-block:: bash
cmake -DPYBIND11_PYTHON_VERSION=3.6 ..
cmake -DPYBIND11_PYTHON_VERSION=3.8 ..
# Another method:
cmake -DPYTHON_EXECUTABLE=/path/to/python ..
@ -371,7 +447,7 @@ See the `Config file`_ docstring for details of relevant CMake variables.
.. code-block:: cmake
cmake_minimum_required(VERSION 3.4...3.18)
cmake_minimum_required(VERSION 3.15...4.0)
project(example LANGUAGES CXX)
find_package(pybind11 REQUIRED)
@ -410,17 +486,16 @@ can refer to the same [cmake_example]_ repository for a full sample project
FindPython mode
---------------
CMake 3.12+ (3.15+ recommended, 3.18.2+ ideal) added a new module called
FindPython that had a highly improved search algorithm and modern targets
and tools. If you use FindPython, pybind11 will detect this and use the
existing targets instead:
Modern CMake (3.18.2+ ideal) added a new module called FindPython that had a
highly improved search algorithm and modern targets and tools. If you use
FindPython, pybind11 will detect this and use the existing targets instead:
.. code-block:: cmake
cmake_minimum_required(VERSION 3.15...3.19)
cmake_minimum_required(VERSION 3.15...4.0)
project(example LANGUAGES CXX)
find_package(Python COMPONENTS Interpreter Development REQUIRED)
find_package(Python 3.8 COMPONENTS Interpreter Development REQUIRED)
find_package(pybind11 CONFIG REQUIRED)
# or add_subdirectory(pybind11)
@ -433,9 +508,8 @@ algorithms from the CMake invocation, with ``-DPYBIND11_FINDPYTHON=ON``.
.. warning::
If you use FindPython2 and FindPython3 to dual-target Python, use the
individual targets listed below, and avoid targets that directly include
Python parts.
If you use FindPython to multi-target Python versions, use the individual
targets listed below, and avoid targets that directly include Python parts.
There are `many ways to hint or force a discovery of a specific Python
installation <https://cmake.org/cmake/help/latest/module/FindPython.html>`_),
@ -462,17 +536,14 @@ available in all modes. The targets provided are:
``pybind11::headers``
Just the pybind11 headers and minimum compile requirements
``pybind11::python2_no_register``
Quiets the warning/error when mixing C++14 or higher and Python 2
``pybind11::pybind11``
Python headers + ``pybind11::headers`` + ``pybind11::python2_no_register`` (Python 2 only)
Python headers + ``pybind11::headers``
``pybind11::python_link_helper``
Just the "linking" part of pybind11:module
``pybind11::module``
Everything for extension modules - ``pybind11::pybind11`` + ``Python::Module`` (FindPython CMake 3.15+) or ``pybind11::python_link_helper``
Everything for extension modules - ``pybind11::pybind11`` + ``Python::Module`` (FindPython) or ``pybind11::python_link_helper``
``pybind11::embed``
Everything for embedding the Python interpreter - ``pybind11::pybind11`` + ``Python::Python`` (FindPython) or Python libs
@ -499,7 +570,7 @@ You can use these targets to build complex applications. For example, the
.. code-block:: cmake
cmake_minimum_required(VERSION 3.4)
cmake_minimum_required(VERSION 3.15...4.0)
project(example LANGUAGES CXX)
find_package(pybind11 REQUIRED) # or add_subdirectory(pybind11)
@ -509,7 +580,10 @@ You can use these targets to build complex applications. For example, the
target_link_libraries(example PRIVATE pybind11::module pybind11::lto pybind11::windows_extras)
pybind11_extension(example)
pybind11_strip(example)
if(NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug|RelWithDebInfo)
# Strip unnecessary sections of the binary on Linux/macOS
pybind11_strip(example)
endif()
set_target_properties(example PROPERTIES CXX_VISIBILITY_PRESET "hidden"
CUDA_VISIBILITY_PRESET "hidden")
@ -554,7 +628,7 @@ information about usage in C++, see :doc:`/advanced/embedding`.
.. code-block:: cmake
cmake_minimum_required(VERSION 3.4...3.18)
cmake_minimum_required(VERSION 3.15...4.0)
project(example LANGUAGES CXX)
find_package(pybind11 REQUIRED) # or add_subdirectory(pybind11)
@ -577,21 +651,12 @@ On Linux, you can compile an example such as the one given in
$ c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) example.cpp -o example$(python3-config --extension-suffix)
The flags given here assume that you're using Python 3. For Python 2, just
change the executable appropriately (to ``python`` or ``python2``).
The ``python3 -m pybind11 --includes`` command fetches the include paths for
both pybind11 and Python headers. This assumes that pybind11 has been installed
using ``pip`` or ``conda``. If it hasn't, you can also manually specify
``-I <path-to-pybind11>/include`` together with the Python includes path
``python3-config --includes``.
Note that Python 2.7 modules don't use a special suffix, so you should simply
use ``example.so`` instead of ``example$(python3-config --extension-suffix)``.
Besides, the ``--extension-suffix`` option may or may not be available, depending
on the distribution; in the latter case, the module extension can be manually
set to ``.so``.
On macOS: the build command is almost the same but it also requires passing
the ``-undefined dynamic_lookup`` flag so as to ignore missing symbols when
building the module:
@ -623,6 +688,13 @@ Building with Bazel
You can build with the Bazel build system using the `pybind11_bazel
<https://github.com/pybind/pybind11_bazel>`_ repository.
Building with Meson
===================
You can use Meson, which has support for ``pybind11`` as a dependency (internally
relying on our ``pkg-config`` support). See the :ref:`module example above <meson-example>`.
Generating binding code automatically
=====================================
@ -640,9 +712,20 @@ classes or incorporating modern meta-programming constructs.
.. [AutoWIG] https://github.com/StatisKit/AutoWIG
[robotpy-build]_ is a is a pure python, cross platform build tool that aims to
simplify creation of python wheels for pybind11 projects, and provide
cross-project dependency management. Additionally, it is able to autogenerate
customizable pybind11-based wrappers by parsing C++ header files.
[semiwrap]_ is a build tool that makes it simpler to wrap C/C++ libraries with
pybind11 by automating large portions of the wrapping process and handling some
of the more complex aspects of creating pybind11 based wrappers (especially with
trampolines to allow inheriting from C++ classes from Python). It includes a
hatchling plugin that autogenerates meson.build files that can be built using
meson, and those build files parse your wrapped headers and generate/compile
pybind11 based wrappers into python extension modules.
.. [robotpy-build] https://robotpy-build.readthedocs.io
.. [semiwrap] https://semiwrap.readthedocs.io
[litgen]_ is an automatic python bindings generator with a focus on generating
documented and discoverable bindings: bindings will nicely reproduce the documentation
found in headers. It is based on srcML (srcml.org), a highly scalable, multi-language
parsing tool with a developer centric approach. The API that you want to expose to python
must be C++14 compatible (but your implementation can use more modern constructs).
.. [litgen] https://pthom.github.io/litgen

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pybind11 documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 11 19:23:48 2015.
@ -12,6 +11,7 @@
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import annotations
import os
import re
@ -36,6 +36,8 @@ DIR = Path(__file__).parent.resolve()
# ones.
extensions = [
"breathe",
"myst_parser",
"sphinx_copybutton",
"sphinxcontrib.rsvgconverter",
"sphinxcontrib.moderncmakedomain",
]
@ -48,9 +50,7 @@ breathe_domain_by_extension = {"h": "cpp"}
templates_path = [".templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
@ -68,9 +68,10 @@ author = "Wenzel Jakob"
# built documents.
# Read the listed version
with open("../pybind11/_version.py") as f:
code = compile(f.read(), "../pybind11/_version.py", "exec")
loc = {}
version_file = DIR.parent / "pybind11/_version.py"
with version_file.open(encoding="utf-8") as f:
code = compile(f.read(), version_file, "exec")
loc = {"__file__": str(version_file)}
exec(code, loc)
# The full version, including alpha/beta/rc tags.
@ -81,7 +82,7 @@ version = loc["__version__"]
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
@ -126,23 +127,7 @@ todo_include_todos = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_context = {"css_files": ["_static/theme_overrides.css"]}
else:
html_context = {
"css_files": [
"//media.readthedocs.org/css/sphinx_rtd_theme.css",
"//media.readthedocs.org/css/readthedocs-doc-embed.css",
"_static/theme_overrides.css",
]
}
html_theme = "furo"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@ -173,6 +158,10 @@ else:
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
@ -345,9 +334,9 @@ def generate_doxygen_xml(app):
subprocess.call(["doxygen", "--version"])
retcode = subprocess.call(["doxygen"], cwd=app.confdir)
if retcode < 0:
sys.stderr.write("doxygen error code: {}\n".format(-retcode))
sys.stderr.write(f"doxygen error code: {-retcode}\n")
except OSError as e:
sys.stderr.write("doxygen execution failed: {}\n".format(e))
sys.stderr.write(f"doxygen execution failed: {e}\n")
def prepare(app):
@ -365,12 +354,11 @@ def prepare(app):
f.write(contents)
def clean_up(app, exception):
def clean_up(app, exception): # noqa: ARG001
(DIR / "readme.rst").unlink()
def setup(app):
# Add hook for building doxygen xml when needed
app.connect("builder-inited", generate_doxygen_xml)

View File

@ -8,9 +8,7 @@ Frequently asked questions
filename of the extension library (without suffixes such as ``.so``).
2. If the above did not fix the issue, you are likely using an incompatible
version of Python (for instance, the extension library was compiled against
Python 2, while the interpreter is running on top of some version of Python
3, or vice versa).
version of Python that does not match what you compiled with.
"Symbol not found: ``__Py_ZeroStruct`` / ``_PyInstanceMethod_Type``"
========================================================================
@ -92,7 +90,7 @@ following example:
void init_ex2(py::module_ &);
/* ... */
PYBIND11_MODULE(example, m) {
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
init_ex1(m);
init_ex2(m);
/* ... */
@ -147,7 +145,7 @@ using C++14 template metaprogramming.
.. _`faq:hidden_visibility`:
"SomeClass declared with greater visibility than the type of its field SomeClass::member [-Wattributes]"
"'SomeClass' declared with greater visibility than the type of its field 'SomeClass::member' [-Wattributes]"
============================================================================================================
This error typically indicates that you are compiling without the required
@ -222,20 +220,6 @@ In addition to decreasing binary size, ``-fvisibility=hidden`` also avoids
potential serious issues when loading multiple modules and is required for
proper pybind operation. See the previous FAQ entry for more details.
Working with ancient Visual Studio 2008 builds on Windows
=========================================================
The official Windows distributions of Python are compiled using truly
ancient versions of Visual Studio that lack good C++11 support. Some users
implicitly assume that it would be impossible to load a plugin built with
Visual Studio 2015 into a Python distribution that was compiled using Visual
Studio 2008. However, no such issue exists: it's perfectly legitimate to
interface DLLs that are built with different compilers and/or C libraries.
Common gotchas to watch out for involve not ``free()``-ing memory region
that that were ``malloc()``-ed in another shared library, using data
structures with incompatible ABIs, and so on. pybind11 is very careful not
to make these types of mistakes.
How can I properly handle Ctrl-C in long-running functions?
===========================================================
@ -251,8 +235,7 @@ been received, you must either explicitly interrupt execution by throwing
.. code-block:: cpp
PYBIND11_MODULE(example, m)
{
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
m.def("long running_func", []()
{
for (;;) {
@ -263,6 +246,50 @@ been received, you must either explicitly interrupt execution by throwing
});
}
What is a highly conclusive and simple way to find memory leaks (e.g. in pybind11 bindings)?
============================================================================================
Use ``while True`` & ``top`` (Linux, macOS).
For example, locally change tests/test_type_caster_pyobject_ptr.py like this:
.. code-block:: diff
def test_return_list_pyobject_ptr_reference():
+ while True:
vec_obj = m.return_list_pyobject_ptr_reference(ValueHolder)
assert [e.value for e in vec_obj] == [93, 186]
# Commenting out the next `assert` will leak the Python references.
# An easy way to see evidence of the leaks:
# Insert `while True:` as the first line of this function and monitor the
# process RES (Resident Memory Size) with the Unix top command.
- assert m.dec_ref_each_pyobject_ptr(vec_obj) == 2
+ # assert m.dec_ref_each_pyobject_ptr(vec_obj) == 2
Then run the test as you would normally do, which will go into the infinite loop.
**In another shell, but on the same machine** run:
.. code-block:: bash
top
This will show:
.. code-block::
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
1266095 rwgk 20 0 5207496 611372 45696 R 100.0 0.3 0:08.01 test_type_caste
Look for the number under ``RES`` there. You'll see it going up very quickly.
**Don't forget to Ctrl-C the test command** before your machine becomes
unresponsive due to swapping.
This method only takes a couple minutes of effort and is very conclusive.
What you want to see is that the ``RES`` number is stable after a couple
seconds.
CMake doesn't detect the right Python version
=============================================
@ -274,9 +301,9 @@ CMake configure line. (Replace ``$(which python)`` with a path to python if
your prefer.)
You can alternatively try ``-DPYBIND11_FINDPYTHON=ON``, which will activate the
new CMake FindPython support instead of pybind11's custom search. Requires
CMake 3.12+, and 3.15+ or 3.18.2+ are even better. You can set this in your
``CMakeLists.txt`` before adding or finding pybind11, as well.
new CMake FindPython support instead of pybind11's custom search. Newer CMake,
like, 3.18.2+, is recommended. You can set this in your ``CMakeLists.txt``
before adding or finding pybind11, as well.
Inconsistent detection of Python version in CMake and pybind11
==============================================================
@ -289,27 +316,7 @@ Conflicts can arise, however, when using pybind11 in a project that *also* uses
the CMake Python detection in a system with several Python versions installed.
This difference may cause inconsistencies and errors if *both* mechanisms are
used in the same project. Consider the following CMake code executed in a
system with Python 2.7 and 3.x installed:
.. code-block:: cmake
find_package(PythonInterp)
find_package(PythonLibs)
find_package(pybind11)
It will detect Python 2.7 and pybind11 will pick it as well.
In contrast this code:
.. code-block:: cmake
find_package(pybind11)
find_package(PythonInterp)
find_package(PythonLibs)
will detect Python 3.x for pybind11 and may crash on
``find_package(PythonLibs)`` afterwards.
used in the same project.
There are three possible solutions:
@ -317,10 +324,11 @@ There are three possible solutions:
from CMake and rely on pybind11 in detecting Python version. If this is not
possible, the CMake machinery should be called *before* including pybind11.
2. Set ``PYBIND11_FINDPYTHON`` to ``True`` or use ``find_package(Python
COMPONENTS Interpreter Development)`` on modern CMake (3.12+, 3.15+ better,
3.18.2+ best). Pybind11 in these cases uses the new CMake FindPython instead
of the old, deprecated search tools, and these modules are much better at
finding the correct Python.
COMPONENTS Interpreter Development)`` on modern CMake ( 3.18.2+ best).
Pybind11 in these cases uses the new CMake FindPython instead of the old,
deprecated search tools, and these modules are much better at finding the
correct Python. If FindPythonLibs/Interp are not available (CMake 3.27+),
then this will be ignored and FindPython will be used.
3. Set ``PYBIND11_NOPYTHON`` to ``TRUE``. Pybind11 will not search for Python.
However, you will have to use the target-based system, and do more setup
yourself, because it does not know about or include things that depend on

View File

@ -36,6 +36,7 @@
advanced/pycpp/index
advanced/embedding
advanced/misc
advanced/deprecated
.. toctree::
:caption: Extra Information

View File

@ -87,7 +87,7 @@ Global install with brew
The brew package manager (Homebrew on macOS, or Linuxbrew on Linux) has a
`pybind11 package
<https://github.com/Homebrew/homebrew-core/blob/master/Formula/pybind11.rb>`_.
<https://github.com/Homebrew/homebrew-core/blob/master/Formula/p/pybind11.rb>`_.
To install:
.. code-block:: bash

View File

@ -50,10 +50,6 @@ clean, well written patch would likely be accepted to solve them.
One consequence is that containers of ``char *`` are currently not supported.
`#2245 <https://github.com/pybind/pybind11/issues/2245>`_
- The ``cpptest`` does not run on Windows with Python 3.8 or newer, due to DLL
loader changes. User code that is correctly installed should not be affected.
`#2560 <https://github.com/pybind/pybind11/issue/2560>`_
Python 3.9.0 warning
^^^^^^^^^^^^^^^^^^^^

View File

@ -68,8 +68,8 @@ Convenience functions converting to Python types
.. _extras:
Passing extra arguments to ``def`` or ``class_``
================================================
Passing extra arguments to ``def`` or ``py::class_``
====================================================
.. doxygengroup:: annotations
:members:

View File

@ -1,9 +1,8 @@
On version numbers
^^^^^^^^^^^^^^^^^^
The two version numbers (C++ and Python) must match when combined (checked when
you build the PyPI package), and must be a valid `PEP 440
<https://www.python.org/dev/peps/pep-0440>`_ version when combined.
The version number must be a valid `PEP 440
<https://www.python.org/dev/peps/pep-0440>`_ version number.
For example:
@ -11,87 +10,126 @@ For example:
#define PYBIND11_VERSION_MAJOR X
#define PYBIND11_VERSION_MINOR Y
#define PYBIND11_VERSION_PATCH Z.dev1
#define PYBIND11_VERSION_MICRO Z
#define PYBIND11_VERSION_RELEASE_LEVEL PY_RELEASE_LEVEL_ALPHA
#define PYBIND11_VERSION_RELEASE_SERIAL 0
#define PYBIND11_VERSION_PATCH Za0
For beta, ``PYBIND11_VERSION_PATCH`` should be ``Z.b1``. RC's can be ``Z.rc1``.
Always include the dot (even though PEP 440 allows it to be dropped). For a
final release, this must be a simple integer. There is also a HEX version of
the version just below.
For beta, ``PYBIND11_VERSION_PATCH`` should be ``Zb1``. RC's can be ``Zrc1``.
For a final release, this must be a simple integer.
To release a new version of pybind11:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you don't have nox, you should either use ``pipx run nox`` instead, or use
``pipx install nox`` or ``brew install nox`` (Unix).
``uv tool install nox``, ``pipx install nox``, or ``brew install nox`` (Unix).
- Update the version number
- Update ``PYBIND11_VERSION_MAJOR`` etc. in
``include/pybind11/detail/common.h``. PATCH should be a simple integer.
- Update the version HEX just below, as well.
- Update ``pybind11/_version.py`` (match above)
- Run ``nox -s tests_packaging`` to ensure this was done correctly.
- Ensure that all the information in ``setup.cfg`` is up-to-date, like
supported Python versions.
- Add release date in ``docs/changelog.rst``.
- Check to make sure
`needs-changelog <https://github.com/pybind/pybind11/pulls?q=is%3Apr+is%3Aclosed+label%3A%22needs+changelog%22>`_
issues are entered in the changelog (clear the label when done).
- ``git add`` and ``git commit``, ``git push``. **Ensure CI passes**. (If it
fails due to a known flake issue, either ignore or restart CI.)
- Add a release branch if this is a new minor version, or update the existing release branch if it is a patch version
- New branch: ``git checkout -b vX.Y``, ``git push -u origin vX.Y``
- Update branch: ``git checkout vX.Y``, ``git merge <release branch>``, ``git push``
- Update ``PYBIND11_VERSION_MAJOR`` etc. in
``include/pybind11/detail/common.h``. MICRO should be a simple integer.
- Run ``nox -s tests_packaging`` to ensure this was done correctly.
- Ensure that all the information in ``pyproject.toml`` is up-to-date, like
supported Python versions.
- Add release date in ``docs/changelog.md`` and integrate the output of
``nox -s make_changelog``.
- Note that the ``nox -s make_changelog`` command inspects
`needs changelog <https://github.com/pybind/pybind11/pulls?q=is%3Apr+is%3Aclosed+label%3A%22needs+changelog%22>`_.
- Manually clear the ``needs changelog`` labels using the GitHub web
interface (very easy: start by clicking the link above).
- ``git add`` and ``git commit``, ``git push``. **Ensure CI passes**. (If it
fails due to a known flake issue, either ignore or restart CI.)
- Add a release branch if this is a new MINOR version, or update the existing
release branch if it is a patch version
- NOTE: This documentation assumes your ``upstream`` is ``https://github.com/pybind/pybind11.git``
- New branch: ``git checkout -b vX.Y``, ``git push -u upstream vX.Y``
- Update branch: ``git checkout vX.Y``, ``git merge <release branch>``, ``git push``
- Update tags (optional; if you skip this, the GitHub release makes a
non-annotated tag for you)
- ``git tag -a vX.Y.Z -m 'vX.Y.Z release'``.
- ``git push --tags``.
non-annotated tag for you)
- ``git tag -a vX.Y.Z -m 'vX.Y.Z release'``
- ``git grep PYBIND11_VERSION include/pybind11/detail/common.h``
- Last-minute consistency check: same as tag?
- Push the new tag: ``git push upstream vX.Y.Z``
- Update stable
- ``git checkout stable``
- ``git merge master``
- ``git push``
- ``git checkout stable``
- ``git merge -X theirs vX.Y.Z``
- ``git diff vX.Y.Z``
- Carefully review and reconcile any diffs. There should be none.
- ``git push``
- Make a GitHub release (this shows up in the UI, sends new release
notifications to users watching releases, and also uploads PyPI packages).
(Note: if you do not use an existing tag, this creates a new lightweight tag
for you, so you could skip the above step.)
- GUI method: Under `releases <https://github.com/pybind/pybind11/releases>`_
click "Draft a new release" on the far right, fill in the tag name
(if you didn't tag above, it will be made here), fill in a release name
like "Version X.Y.Z", and copy-and-paste the markdown-formatted (!) changelog
into the description (usually ``cat docs/changelog.rst | pandoc -f rst -t gfm``).
Check "pre-release" if this is a beta/RC.
- CLI method: with ``gh`` installed, run ``gh release create vX.Y.Z -t "Version X.Y.Z"``
If this is a pre-release, add ``-p``.
- GUI method: Under `releases <https://github.com/pybind/pybind11/releases>`_
click "Draft a new release" on the far right, fill in the tag name
(if you didn't tag above, it will be made here), fill in a release name
like "Version X.Y.Z", and copy-and-paste the markdown-formatted (!) changelog
into the description. You can remove line breaks and optionally strip links
to PRs and issues, e.g. to a bare ``#1234`` without the hyperlink markup.
Check "pre-release" if this is an alpha/beta/RC.
- CLI method: with ``gh`` installed, run ``gh release create vX.Y.Z -t "Version X.Y.Z"``
If this is a pre-release, add ``-p``.
- Get back to work
- Make sure you are on master, not somewhere else: ``git checkout master``
- Update version macros in ``include/pybind11/detail/common.h`` (set PATCH to
``0.dev1`` and increment MINOR).
- Update ``_version.py`` to match
- Run ``nox -s tests_packaging`` to ensure this was done correctly.
- Add a spot for in-development updates in ``docs/changelog.rst``.
- ``git add``, ``git commit``, ``git push``
If a version branch is updated, remember to set PATCH to ``1.dev1``.
- Make sure you are on master, not somewhere else: ``git checkout master``
If you'd like to bump homebrew, run:
- Update version macros in ``include/pybind11/detail/common.h`` (set PATCH to
``0a0`` and increment MINOR).
.. code-block:: console
- Update ``pybind11/_version.py`` to match.
brew bump-formula-pr --url https://github.com/pybind/pybind11/archive/vX.Y.Z.tar.gz
- Run ``nox -s tests_packaging`` to ensure this was done correctly.
- If the release was a new MINOR version, add a new ``IN DEVELOPMENT``
section in ``docs/changelog.md``.
- ``git add``, ``git commit``, ``git push``
If a version branch is updated, remember to set PATCH to ``1a0``.
Conda-forge should automatically make a PR in a few hours, and automatically
merge it if there are no issues.
merge it if there are no issues. Homebrew should be automatic, too.
Manual packaging
^^^^^^^^^^^^^^^^
If you need to manually upload releases, you can download the releases from the job artifacts and upload them with twine. You can also make the files locally (not recommended in general, as your local directory is more likely to be "dirty" and SDists love picking up random unrelated/hidden files); this is the procedure:
If you need to manually upload releases, you can download the releases from
the job artifacts and upload them with twine. You can also make the files
locally (not recommended in general, as your local directory is more likely
to be "dirty" and SDists love picking up random unrelated/hidden files);
this is the procedure:
.. code-block:: bash
nox -s build
nox -s build_global
twine upload dist/*
This makes SDists and wheels, and the final line uploads them.

View File

@ -0,0 +1,7 @@
breathe
furo
myst_parser
sphinx
sphinx-copybutton
sphinxcontrib-moderncmakedomain
sphinxcontrib-svg2pdfconverter

View File

@ -1,5 +1,91 @@
breathe==4.31.0
sphinx==3.5.4
sphinx_rtd_theme==1.0.0
sphinxcontrib-moderncmakedomain==3.19
sphinxcontrib-svg2pdfconverter==1.1.1
# This file was autogenerated by uv via the following command:
# uv pip compile docs/requirements.in -o docs/requirements.txt
alabaster==0.7.16
# via sphinx
babel==2.14.0
# via sphinx
beautifulsoup4==4.12.3
# via furo
breathe==4.35.0
# via -r requirements.in
certifi==2024.7.4
# via requests
charset-normalizer==3.3.2
# via requests
docutils==0.20.1
# via
# breathe
# myst-parser
# sphinx
furo==2024.1.29
# via -r requirements.in
idna==3.7
# via requests
imagesize==1.4.1
# via sphinx
importlib-metadata==8.7.0
# via sphinx
jinja2==3.1.6
# via
# myst-parser
# sphinx
markdown-it-py==3.0.0
# via
# mdit-py-plugins
# myst-parser
markupsafe==2.1.5
# via jinja2
mdit-py-plugins==0.4.2
# via myst-parser
mdurl==0.1.2
# via markdown-it-py
myst-parser==3.0.1
# via -r requirements.in
packaging==24.0
# via sphinx
pygments==2.17.2
# via
# furo
# sphinx
pyyaml==6.0.2
# via myst-parser
requests==2.32.4
# via sphinx
snowballstemmer==2.2.0
# via sphinx
soupsieve==2.5
# via beautifulsoup4
sphinx==7.2.6
# via
# -r requirements.in
# breathe
# furo
# myst-parser
# sphinx-basic-ng
# sphinx-copybutton
# sphinxcontrib-moderncmakedomain
# sphinxcontrib-svg2pdfconverter
sphinx-basic-ng==1.0.0b2
# via furo
sphinx-copybutton==0.5.2
# via -r requirements.in
sphinxcontrib-applehelp==1.0.8
# via sphinx
sphinxcontrib-devhelp==1.0.6
# via sphinx
sphinxcontrib-htmlhelp==2.0.5
# via sphinx
sphinxcontrib-jsmath==1.0.1
# via sphinx
sphinxcontrib-moderncmakedomain==3.27.0
# via -r requirements.in
sphinxcontrib-qthelp==1.0.7
# via sphinx
sphinxcontrib-serializinghtml==1.1.10
# via sphinx
sphinxcontrib-svg2pdfconverter==1.2.2
# via -r requirements.in
urllib3==2.5.0
# via requests
zipp==3.23.0
# via importlib-metadata

View File

@ -8,6 +8,200 @@ to a new version. But it goes into more detail. This includes things like
deprecated APIs and their replacements, build system changes, general code
modernization and other useful information.
.. _upgrade-guide-3.0:
v3.0
====
pybind11 v3.0 introduces major new features, but the vast majority of
existing extensions are expected to build and run without modification. Minor
adjustments may be needed in rare cases, and any such changes can be easily
wrapped in preprocessor conditionals to maintain compatibility with the
2.x series.
However, due to new features and modernizations, extensions built with
pybind11 v3.0 are not ABI-compatible with those built using v2.13. To ensure
cross-extension-module compatibility, it is recommended to rebuild all
pybind11-based extensions with v3.0.
CMake support now defaults to the modern FindPython module. If you haven't
updated yet, we provide some backward compatibility for ``PYTHON_*`` variables,
but you should switch to using ``Python_*`` variables instead. Note that
setting ``PYTHON_*`` variables no longer affects the build.
A major new feature in this release is the integration of
``py::smart_holder``, which improves support for ``std::unique_ptr``
and ``std::shared_ptr``, resolving several long-standing issues. See
:ref:`smart_holder` for details. Closely related is the addition
of ``py::trampoline_self_life_support``, documented under
:ref:`overriding_virtuals`.
This release includes a major modernization of cross-extension-module
ABI compatibility handling. The new implementation reflects actual ABI
compatibility much more accurately than in previous versions. The details
are subtle and complex; see
`#4953 <https://github.com/pybind/pybind11/pull/4953>`_ and
`#5439 <https://github.com/pybind/pybind11/pull/5439>`_.
Also new in v3.0 is ``py::native_enum``, a modern API for exposing
C++ enumerations as native Python types — typically standard-library
``enum.Enum`` or related subclasses. This provides improved integration with
Python's enum system, compared to the older (now deprecated) ``py::enum_``.
See `#5555 <https://github.com/pybind/pybind11/pull/5555>`_ for details.
Functions exposed with pybind11 are now pickleable. This removes a
long-standing obstacle when using pybind11-bound functions with Python features
that rely on pickling, such as multiprocessing and caching tools.
See `#5580 <https://github.com/pybind/pybind11/pull/5580>`_ for details.
Anything producing a deprecation warning in the 2.x series may be removed in a
future minor release of 3.x. Most of these are still present in 3.0 in order to ease
transition. The new :ref:`deprecated` page details deprecations.
Migration Recommendations
-------------------------
We recommend migrating to pybind11 v3.0 promptly, while keeping initial
changes to a minimum. Most projects can upgrade simply by updating the
pybind11 version, without altering existing binding code.
After a short stabilization period — enough to surface any subtle issues —
you may incrementally adopt new features where appropriate:
* Use ``py::smart_holder`` and ``py::trampoline_self_life_support`` as needed,
or to improve code health. Note that ``py::classh`` is available as a
shortcut — for example, ``py::classh<Pet>`` is shorthand for
``py::class_<Pet, py::smart_holder>``. This is designed to enable easy
experimentation with ``py::smart_holder`` without introducing distracting
whitespace changes. In many cases, a global replacement of ``py::class_``
with ``py::classh`` can be an effective first step. Build failures will
quickly identify places where ``std::shared_ptr<...>`` holders need to be
removed. Runtime failures (assuming good unit test coverage) will highlight
base-and-derived class situations that require coordinated changes.
Note that ``py::bind_vector`` and ``py::bind_map`` (in pybind11/stl_bind.h)
have a ``holder_type`` template parameter that defaults to
``std::unique_ptr``. If ``py::smart_holder`` functionality is desired or
required, use e.g. ``py::bind_vector<VecType, py::smart_holder>``.
* Gradually migrate from ``py::enum_`` to ``py::native_enum`` to improve
integration with Python's standard enum types.
There is no urgency to refactor existing, working bindings — adopt new
features as the need arises or as part of ongoing maintenance efforts.
If you are using CMake, update to FindPython variables (mostly changing
variables from ``PYTHON_*`` -> ``Python_*``). You should see if you can use
``set(PYBIND11_FINDPYTHON ON)``, which has been supported for years and will
avoid setting the compatibility mode variables (and will avoid a warning).
Potential stumbling blocks when migrating to v3.0
-------------------------------------------------
The following issues are very unlikely to arise, and easy to work around:
* In rare cases, a C++ enum may be bound to Python via a
:ref:`custom type caster <custom_type_caster>`. In such cases, a
template specialization like this may be required:
.. code-block:: cpp
#if defined(PYBIND11_HAS_NATIVE_ENUM)
namespace pybind11::detail {
template <typename FancyEnum>
struct type_caster_enum_type_enabled<
FancyEnum,
enable_if_t<is_fancy_enum<FancyEnum>::value>> : std::false_type {};
}
#endif
This specialization is needed only if the custom type caster is templated.
The ``PYBIND11_HAS_NATIVE_ENUM`` guard is needed only
if backward compatibility with pybind11v2 is required.
* Similarly, template specializations like the following may be required
if there are custom
* ``pybind11::detail::copyable_holder_caster`` or
* ``pybind11::detail::move_only_holder_caster``
implementations that are used for ``std::shared_ptr`` or ``std::unique_ptr``
conversions:
.. code-block:: cpp
#if defined(PYBIND11_HAS_INTERNALS_WITH_SMART_HOLDER_SUPPORT)
namespace pybind11::detail {
template <typename ExampleType>
struct copyable_holder_caster_shared_ptr_with_smart_holder_support_enabled<
ExampleType,
enable_if_t<is_example_type<ExampleType>::value>> : std::false_type {};
}
#endif
.. code-block:: cpp
#if defined(PYBIND11_HAS_INTERNALS_WITH_SMART_HOLDER_SUPPORT)
namespace pybind11::detail {
template <typename ExampleType>
struct move_only_holder_caster_unique_ptr_with_smart_holder_support_enabled<
ExampleType,
enable_if_t<is_example_type<ExampleType>::value>> : std::false_type {};
}
#endif
The ``PYBIND11_HAS_INTERNALS_WITH_SMART_HOLDER_SUPPORT`` guard is needed only
if backward compatibility with pybind11v2 is required.
(Note that ``copyable_holder_caster`` and ``move_only_holder_caster`` are not
documented, although they existed since 2017.)
.. _upgrade-guide-2.12:
v2.12
=====
NumPy support has been upgraded to support the 2.x series too. The two relevant
changes are that:
* ``dtype.flags()`` is now a ``uint64`` and ``dtype.alignment()`` an
``ssize_t`` (and NumPy may return an larger than integer value for
``itemsize()`` in NumPy 2.x).
* The long deprecated NumPy function ``PyArray_GetArrayParamsFromObject``
function is not available anymore.
Due to NumPy changes, you may experience difficulties updating to NumPy 2.
Please see the `NumPy 2 migration guide <https://numpy.org/devdocs/numpy_2_0_migration_guide.html>`_
for details.
For example, a more direct change could be that the default integer ``"int_"``
(and ``"uint"``) is now ``ssize_t`` and not ``long`` (affects 64bit windows).
If you want to only support NumPy 1.x for now and are having problems due to
the two internal changes listed above, you can define
``PYBIND11_NUMPY_1_ONLY`` to disable the new support for now. Make sure you
define this on all pybind11 compile units, since it could be a source of ODR
violations if used inconsistently. This option will be removed in the future,
so adapting your code is highly recommended.
.. _upgrade-guide-2.11:
v2.11
=====
* The minimum version of CMake is now 3.5. A future version will likely move to
requiring something like CMake 3.15. Note that CMake 3.27 is removing the
long-deprecated support for ``FindPythonInterp`` if you set 3.27 as the
minimum or maximum supported version. To prepare for that future, CMake 3.15+
using ``FindPython`` or setting ``PYBIND11_FINDPYTHON`` is highly recommended,
otherwise pybind11 will automatically switch to using ``FindPython`` if
``FindPythonInterp`` is not available.
.. _upgrade-guide-2.9:
v2.9
@ -524,7 +718,7 @@ include a declaration of the form:
PYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr<T>)
Continuing to do so wont cause an error or even a deprecation warning,
Continuing to do so won't cause an error or even a deprecation warning,
but it's completely redundant.

View File

@ -10,7 +10,9 @@
#pragma once
#include "detail/common.h"
#include "cast.h"
#include "trampoline_self_life_support.h"
#include <functional>
@ -25,6 +27,9 @@ struct is_method {
explicit is_method(const handle &c) : class_(c) {}
};
/// Annotation for setters
struct is_setter {};
/// Annotation for operators
struct is_operator {};
@ -61,7 +66,7 @@ struct base {
PYBIND11_DEPRECATED(
"base<T>() was deprecated in favor of specifying 'T' as a template argument to class_")
base() {} // NOLINT(modernize-use-equals-default): breaks MSVC 2015 when adding an attribute
base() = default;
};
/// Keep patient alive while nurse lives
@ -77,13 +82,16 @@ struct dynamic_attr {};
/// Annotation which enables the buffer protocol for a type
struct buffer_protocol {};
/// Annotation which enables releasing the GIL before calling the C++ destructor of wrapped
/// instances (pybind/pybind11#1446).
struct release_gil_before_calling_cpp_dtor {};
/// Annotation which requests that a special metaclass is created for a type
struct metaclass {
handle value;
PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.")
// NOLINTNEXTLINE(modernize-use-equals-default): breaks MSVC 2015 when adding an attribute
metaclass() {}
metaclass() = default;
/// Override pybind11's default metaclass
explicit metaclass(handle value) : value(value) {}
@ -185,11 +193,12 @@ struct argument_record {
/// Internal data structure which holds metadata about a bound function (signature, overloads,
/// etc.)
#define PYBIND11_DETAIL_FUNCTION_RECORD_ABI_ID "v1" // PLEASE UPDATE if the struct is changed.
struct function_record {
function_record()
: is_constructor(false), is_new_style_constructor(false), is_stateless(false),
is_operator(false), is_method(false), has_args(false), has_kwargs(false),
prepend(false) {}
is_operator(false), is_method(false), is_setter(false), has_args(false),
has_kwargs(false), prepend(false) {}
/// Function name
char *name = nullptr; /* why no C++ strings? They generate heavier code.. */
@ -230,6 +239,9 @@ struct function_record {
/// True if this is a method
bool is_method : 1;
/// True if this is a setter
bool is_setter : 1;
/// True if the function has a '*args' argument
bool has_args : 1;
@ -261,12 +273,18 @@ struct function_record {
/// Pointer to next overload
function_record *next = nullptr;
};
// The main purpose of this macro is to make it easy to pin-point the critically related code
// sections.
#define PYBIND11_ENSURE_PRECONDITION_FOR_FUNCTIONAL_H_PERFORMANCE_OPTIMIZATIONS(...) \
static_assert( \
__VA_ARGS__, \
"Violation of precondition for pybind11/functional.h performance optimizations!")
/// Special data structure which (temporarily) holds metadata about a bound class
struct type_record {
PYBIND11_NOINLINE type_record()
: multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false),
default_holder(true), module_local(false), is_final(false) {}
module_local(false), is_final(false), release_gil_before_calling_cpp_dtor(false) {}
/// Handle to the parent scope
handle scope;
@ -295,6 +313,12 @@ struct type_record {
/// Function pointer to class_<..>::dealloc
void (*dealloc)(detail::value_and_holder &) = nullptr;
/// Function pointer for casting alias class (aka trampoline) pointer to
/// trampoline_self_life_support pointer. Sidesteps cross-DSO RTTI issues
/// on platforms like macOS (see PR #5728 for details).
get_trampoline_self_life_support_fn get_trampoline_self_life_support
= [](void *) -> trampoline_self_life_support * { return nullptr; };
/// List of base classes of the newly created type
list bases;
@ -316,15 +340,17 @@ struct type_record {
/// Does the class implement the buffer protocol?
bool buffer_protocol : 1;
/// Is the default (unique_ptr) holder type used?
bool default_holder : 1;
/// Is the class definition local to the module shared object?
bool module_local : 1;
/// Is the class inheritable from python classes?
bool is_final : 1;
/// Solves pybind/pybind11#1446
bool release_gil_before_calling_cpp_dtor : 1;
holder_enum_t holder_enum_v = holder_enum_t::undefined;
PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *) ) {
auto *base_info = detail::get_type_info(base, false);
if (!base_info) {
@ -334,20 +360,26 @@ struct type_record {
+ "\" referenced unknown base type \"" + tname + "\"");
}
if (default_holder != base_info->default_holder) {
// SMART_HOLDER_BAKEIN_FOLLOW_ON: Refine holder compatibility checks.
bool this_has_unique_ptr_holder = (holder_enum_v == holder_enum_t::std_unique_ptr);
bool base_has_unique_ptr_holder
= (base_info->holder_enum_v == holder_enum_t::std_unique_ptr);
if (this_has_unique_ptr_holder != base_has_unique_ptr_holder) {
std::string tname(base.name());
detail::clean_type_id(tname);
pybind11_fail("generic_type: type \"" + std::string(name) + "\" "
+ (default_holder ? "does not have" : "has")
+ (this_has_unique_ptr_holder ? "does not have" : "has")
+ " a non-default holder type while its base \"" + tname + "\" "
+ (base_info->default_holder ? "does not" : "does"));
+ (base_has_unique_ptr_holder ? "does not" : "does"));
}
bases.append((PyObject *) base_info->type);
if (base_info->type->tp_dictoffset != 0) {
dynamic_attr = true;
}
#ifdef PYBIND11_BACKWARD_COMPATIBILITY_TP_DICTOFFSET
dynamic_attr |= base_info->type->tp_dictoffset != 0;
#else
dynamic_attr |= (base_info->type->tp_flags & Py_TPFLAGS_MANAGED_DICT) != 0;
#endif
if (caster) {
base_info->implicit_casts.emplace_back(type, caster);
@ -397,7 +429,7 @@ struct process_attribute<doc> : process_attribute_default<doc> {
template <>
struct process_attribute<const char *> : process_attribute_default<const char *> {
static void init(const char *d, function_record *r) { r->doc = const_cast<char *>(d); }
static void init(const char *d, type_record *r) { r->doc = const_cast<char *>(d); }
static void init(const char *d, type_record *r) { r->doc = d; }
};
template <>
struct process_attribute<char *> : process_attribute<const char *> {};
@ -424,6 +456,12 @@ struct process_attribute<is_method> : process_attribute_default<is_method> {
}
};
/// Process an attribute which indicates that this function is a setter
template <>
struct process_attribute<is_setter> : process_attribute_default<is_setter> {
static void init(const is_setter &, function_record *r) { r->is_setter = true; }
};
/// Process an attribute which indicates the parent scope of a method
template <>
struct process_attribute<scope> : process_attribute_default<scope> {
@ -478,7 +516,7 @@ struct process_attribute<arg_v> : process_attribute_default<arg_v> {
}
if (!a.value) {
#if !defined(NDEBUG)
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
std::string descr("'");
if (a.name) {
descr += std::string(a.name) + ": ";
@ -499,7 +537,8 @@ struct process_attribute<arg_v> : process_attribute_default<arg_v> {
#else
pybind11_fail("arg(): could not convert default argument "
"into a Python object (type not registered yet?). "
"Compile in debug mode for more information.");
"#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for "
"more information.");
#endif
}
r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none);
@ -588,6 +627,14 @@ struct process_attribute<module_local> : process_attribute_default<module_local>
static void init(const module_local &l, type_record *r) { r->module_local = l.value; }
};
template <>
struct process_attribute<release_gil_before_calling_cpp_dtor>
: process_attribute_default<release_gil_before_calling_cpp_dtor> {
static void init(const release_gil_before_calling_cpp_dtor &, type_record *r) {
r->release_gil_before_calling_cpp_dtor = true;
}
};
/// Process a 'prepend' attribute, putting this at the beginning of the overload chain
template <>
struct process_attribute<prepend> : process_attribute_default<prepend> {

View File

@ -37,6 +37,9 @@ inline std::vector<ssize_t> f_strides(const std::vector<ssize_t> &shape, ssize_t
return strides;
}
template <typename T, typename SFINAE = void>
struct compare_buffer_info;
PYBIND11_NAMESPACE_END(detail)
/// Information record describing a Python buffer object
@ -99,22 +102,22 @@ struct buffer_info {
template <typename T>
buffer_info(const T *ptr, ssize_t size, bool readonly = true)
: buffer_info(
const_cast<T *>(ptr), sizeof(T), format_descriptor<T>::format(), size, readonly) {}
const_cast<T *>(ptr), sizeof(T), format_descriptor<T>::format(), size, readonly) {}
explicit buffer_info(Py_buffer *view, bool ownview = true)
: buffer_info(
view->buf,
view->itemsize,
view->format,
view->ndim,
{view->shape, view->shape + view->ndim},
/* Though buffer::request() requests PyBUF_STRIDES, ctypes objects
* ignore this flag and return a view with NULL strides.
* When strides are NULL, build them manually. */
view->strides
? std::vector<ssize_t>(view->strides, view->strides + view->ndim)
: detail::c_strides({view->shape, view->shape + view->ndim}, view->itemsize),
(view->readonly != 0)) {
view->buf,
view->itemsize,
view->format,
view->ndim,
{view->shape, view->shape + view->ndim},
/* Though buffer::request() requests PyBUF_STRIDES, ctypes objects
* ignore this flag and return a view with NULL strides.
* When strides are NULL, build them manually. */
view->strides
? std::vector<ssize_t>(view->strides, view->strides + view->ndim)
: detail::c_strides({view->shape, view->shape + view->ndim}, view->itemsize),
(view->readonly != 0)) {
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
this->m_view = view;
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
@ -150,6 +153,17 @@ struct buffer_info {
Py_buffer *view() const { return m_view; }
Py_buffer *&view() { return m_view; }
/* True if the buffer item type is equivalent to `T`. */
// To define "equivalent" by example:
// `buffer_info::item_type_is_equivalent_to<int>(b)` and
// `buffer_info::item_type_is_equivalent_to<long>(b)` may both be true
// on some platforms, but `int` and `unsigned` will never be equivalent.
// For the ground truth, please inspect `detail::compare_buffer_info<>`.
template <typename T>
bool item_type_is_equivalent_to() const {
return detail::compare_buffer_info<T>::compare(*this);
}
private:
struct private_ctr_tag {};
@ -162,7 +176,7 @@ private:
detail::any_container<ssize_t> &&strides_in,
bool readonly)
: buffer_info(
ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in), readonly) {}
ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in), readonly) {}
Py_buffer *m_view = nullptr;
bool ownview = false;
@ -170,9 +184,10 @@ private:
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T, typename SFINAE = void>
template <typename T, typename SFINAE>
struct compare_buffer_info {
static bool compare(const buffer_info &b) {
// NOLINTNEXTLINE(bugprone-sizeof-expression) Needed for `PyObject *`
return b.format == format_descriptor<T>::format() && b.itemsize == (ssize_t) sizeof(T);
}
};

File diff suppressed because it is too large Load Diff

View File

@ -18,17 +18,6 @@
#include <datetime.h>
#include <mutex>
// Backport the PyDateTime_DELTA functions from Python3.3 if required
#ifndef PyDateTime_DELTA_GET_DAYS
# define PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta *) o)->days)
#endif
#ifndef PyDateTime_DELTA_GET_SECONDS
# define PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta *) o)->seconds)
#endif
#ifndef PyDateTime_DELTA_GET_MICROSECONDS
# define PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta *) o)->microseconds)
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
@ -74,6 +63,9 @@ public:
get_duration(const std::chrono::duration<rep, period> &src) {
return src;
}
static const std::chrono::duration<rep, period> &
get_duration(const std::chrono::duration<rep, period> &&)
= delete;
// If this is a time_point get the time_since_epoch
template <typename Clock>
@ -196,7 +188,7 @@ public:
using us_t = duration<int, std::micro>;
auto us = duration_cast<us_t>(src.time_since_epoch() % seconds(1));
if (us.count() < 0) {
us += seconds(1);
us += duration_cast<us_t>(seconds(1));
}
// Subtract microseconds BEFORE `system_clock::to_time_t`, because:

View File

@ -0,0 +1,15 @@
NOTE
----
The C++ code here
** only depends on <Python.h> **
and nothing else.
DO NOT ADD CODE WITH OTHER EXTERNAL DEPENDENCIES TO THIS DIRECTORY.
Read on:
pybind11_conduit_v1.h — Type-safe interoperability between different
independent Python/C++ bindings systems.

View File

@ -0,0 +1,116 @@
// Copyright (c) 2024 The pybind Community.
/* The pybind11_conduit_v1 feature enables type-safe interoperability between
* different independent Python/C++ bindings systems,
* including pybind11 versions with different PYBIND11_INTERNALS_VERSION's.
* NOTE: The conduit feature
only covers from-Python-to-C++ conversions, it
does not cover from-C++-to-Python conversions.
(For the latter, a different feature would have to be added.)
The naming of the feature is a bit misleading:
* The feature is in no way tied to pybind11 internals.
* It just happens to originate from pybind11 and currently still lives there.
* The only external dependency is <Python.h>.
The implementation is a VERY light-weight dependency. It is designed to be
compatible with any ISO C++11 (or higher) compiler, and does NOT require
C++ Exception Handling to be enabled.
Please see https://github.com/pybind/pybind11/pull/5296 for more background.
The implementation involves a
def _pybind11_conduit_v1_(
self,
pybind11_platform_abi_id: bytes,
cpp_type_info_capsule: capsule,
pointer_kind: bytes) -> capsule
method that is meant to be added to Python objects wrapping C++ objects
(e.g. pybind11::class_-wrapped types).
The design of the _pybind11_conduit_v1_ feature provides two layers of
protection against C++ ABI mismatches:
* The first and most important layer is that the pybind11_platform_abi_id's
must match between extensions. This will never be perfect, but is the same
pragmatic approach used in pybind11 since 2017
(https://github.com/pybind/pybind11/commit/96997a4b9d4ec3d389a570604394af5d5eee2557,
PYBIND11_INTERNALS_ID).
* The second layer is that the typeid(std::type_info).name()'s must match
between extensions.
The implementation below (which is shorter than this comment!), serves as a
battle-tested specification. The main API is this one function:
auto *cpp_pointer = pybind11_conduit_v1::get_type_pointer_ephemeral<YourType>(py_obj);
It is meant to be a minimalistic reference implementation, intentionally
without comprehensive error reporting. It is expected that major bindings
systems will roll their own, compatible implementations, potentially with
system-specific error reporting. The essential specifications all bindings
systems need to agree on are merely:
* PYBIND11_PLATFORM_ABI_ID (const char* literal).
* The cpp_type_info capsule (see below: a void *ptr and a const char *name).
* The cpp_conduit capsule (see below: a void *ptr and a const char *name).
* "raw_pointer_ephemeral" means: the lifetime of the pointer is the lifetime
of the py_obj.
*/
// THIS MUST STAY AT THE TOP!
#include "pybind11_platform_abi_id.h"
#include <Python.h>
#include <typeinfo>
namespace pybind11_conduit_v1 {
inline void *get_raw_pointer_ephemeral(PyObject *py_obj, const std::type_info *cpp_type_info) {
PyObject *cpp_type_info_capsule
= PyCapsule_New(const_cast<void *>(static_cast<const void *>(cpp_type_info)),
typeid(std::type_info).name(),
nullptr);
if (cpp_type_info_capsule == nullptr) {
return nullptr;
}
PyObject *cpp_conduit = PyObject_CallMethod(py_obj,
"_pybind11_conduit_v1_",
"yOy",
PYBIND11_PLATFORM_ABI_ID,
cpp_type_info_capsule,
"raw_pointer_ephemeral");
Py_DECREF(cpp_type_info_capsule);
if (cpp_conduit == nullptr) {
return nullptr;
}
void *raw_ptr = PyCapsule_GetPointer(cpp_conduit, cpp_type_info->name());
Py_DECREF(cpp_conduit);
if (PyErr_Occurred()) {
return nullptr;
}
return raw_ptr;
}
template <typename T>
T *get_type_pointer_ephemeral(PyObject *py_obj) {
void *raw_ptr = get_raw_pointer_ephemeral(py_obj, &typeid(T));
if (raw_ptr == nullptr) {
return nullptr;
}
return static_cast<T *>(raw_ptr);
}
} // namespace pybind11_conduit_v1

View File

@ -0,0 +1,87 @@
#pragma once
// Copyright (c) 2024 The pybind Community.
// To maximize reusability:
// DO NOT ADD CODE THAT REQUIRES C++ EXCEPTION HANDLING.
#include "wrap_include_python_h.h"
// Implementation details. DO NOT USE ELSEWHERE. (Unfortunately we cannot #undef them.)
// This is duplicated here to maximize portability.
#define PYBIND11_PLATFORM_ABI_ID_STRINGIFY(x) #x
#define PYBIND11_PLATFORM_ABI_ID_TOSTRING(x) PYBIND11_PLATFORM_ABI_ID_STRINGIFY(x)
#ifdef PYBIND11_COMPILER_TYPE
// // To maintain backward compatibility (see PR #5439).
# define PYBIND11_COMPILER_TYPE_LEADING_UNDERSCORE ""
#else
# define PYBIND11_COMPILER_TYPE_LEADING_UNDERSCORE "_"
# if defined(__MINGW32__)
# define PYBIND11_COMPILER_TYPE "mingw"
# elif defined(__CYGWIN__)
# define PYBIND11_COMPILER_TYPE "gcc_cygwin"
# elif defined(_MSC_VER)
# define PYBIND11_COMPILER_TYPE "msvc"
# elif defined(__clang__) || defined(__GNUC__)
# define PYBIND11_COMPILER_TYPE "system" // Assumed compatible with system compiler.
# else
# error "Unknown PYBIND11_COMPILER_TYPE: PLEASE REVISE THIS CODE."
# endif
#endif
// PR #5439 made this macro obsolete. However, there are many manipulations of this macro in the
// wild. Therefore, to maintain backward compatibility, it is kept around.
#ifndef PYBIND11_STDLIB
# define PYBIND11_STDLIB ""
#endif
#ifndef PYBIND11_BUILD_ABI
# if defined(_MSC_VER) // See PR #4953.
# if defined(_MT) && defined(_DLL) // Corresponding to CL command line options /MD or /MDd.
# if (_MSC_VER) / 100 == 19
# define PYBIND11_BUILD_ABI "_md_mscver19"
# else
# error "Unknown major version for MSC_VER: PLEASE REVISE THIS CODE."
# endif
# elif defined(_MT) // Corresponding to CL command line options /MT or /MTd.
# define PYBIND11_BUILD_ABI "_mt_mscver" PYBIND11_PLATFORM_ABI_ID_TOSTRING(_MSC_VER)
# else
# if (_MSC_VER) / 100 == 19
# define PYBIND11_BUILD_ABI "_none_mscver19"
# else
# error "Unknown major version for MSC_VER: PLEASE REVISE THIS CODE."
# endif
# endif
# elif defined(_LIBCPP_ABI_VERSION) // https://libcxx.llvm.org/DesignDocs/ABIVersioning.html
# define PYBIND11_BUILD_ABI \
"_libcpp_abi" PYBIND11_PLATFORM_ABI_ID_TOSTRING(_LIBCPP_ABI_VERSION)
# elif defined(_GLIBCXX_USE_CXX11_ABI) // See PR #5439.
# if defined(__NVCOMPILER)
// // Assume that NVHPC is in the 1xxx ABI family.
// // THIS ASSUMPTION IS NOT FUTURE PROOF but apparently the best we can do.
// // Please let us know if there is a way to validate the assumption here.
# elif !defined(__GXX_ABI_VERSION)
# error \
"Unknown platform or compiler (_GLIBCXX_USE_CXX11_ABI): PLEASE REVISE THIS CODE."
# endif
# if defined(__GXX_ABI_VERSION) && __GXX_ABI_VERSION < 1002 || __GXX_ABI_VERSION >= 2000
# error "Unknown platform or compiler (__GXX_ABI_VERSION): PLEASE REVISE THIS CODE."
# endif
# define PYBIND11_BUILD_ABI \
"_libstdcpp_gxx_abi_1xxx_use_cxx11_abi_" PYBIND11_PLATFORM_ABI_ID_TOSTRING( \
_GLIBCXX_USE_CXX11_ABI)
# else
# error "Unknown platform or compiler: PLEASE REVISE THIS CODE."
# endif
#endif
// On MSVC, debug and release builds are not ABI-compatible!
#if defined(_MSC_VER) && defined(_DEBUG)
# define PYBIND11_BUILD_TYPE "_debug"
#else
# define PYBIND11_BUILD_TYPE ""
#endif
#define PYBIND11_PLATFORM_ABI_ID \
PYBIND11_COMPILER_TYPE PYBIND11_STDLIB PYBIND11_BUILD_ABI PYBIND11_BUILD_TYPE

View File

@ -0,0 +1,58 @@
#pragma once
// Copyright (c) 2024 The pybind Community.
// STRONG REQUIREMENT:
// This header is a wrapper around `#include <Python.h>`, therefore it
// MUST BE INCLUDED BEFORE ANY STANDARD HEADERS are included.
// See also:
// https://docs.python.org/3/c-api/intro.html#include-files
// Quoting from there:
// Note: Since Python may define some pre-processor definitions which affect
// the standard headers on some systems, you must include Python.h before
// any standard headers are included.
// To maximize reusability:
// DO NOT ADD CODE THAT REQUIRES C++ EXCEPTION HANDLING.
// Don't let Python.h #define (v)snprintf as macro because they are implemented
// properly in Visual Studio since 2015.
#if defined(_MSC_VER)
# define HAVE_SNPRINTF 1
#endif
#if defined(_MSC_VER)
# pragma warning(push)
# pragma warning(disable : 4505)
// C4505: 'PySlice_GetIndicesEx': unreferenced local function has been removed
#endif
#include <Python.h>
#include <frameobject.h>
#include <pythread.h>
#if defined(_MSC_VER)
# pragma warning(pop)
#endif
#if defined(PYBIND11_DEBUG_MARKER)
# define _DEBUG 1
# undef PYBIND11_DEBUG_MARKER
#endif
// Python #defines overrides on all sorts of core functions, which
// tends to wreak havok in C++ codebases that expect these to work
// like regular functions (potentially with several overloads).
#if defined(isalnum)
# undef isalnum
# undef isalpha
# undef islower
# undef isspace
# undef isupper
# undef tolower
# undef toupper
#endif
#if defined(copysign)
# undef copysign
#endif

View File

@ -0,0 +1,56 @@
// Copyright (c) 2016-2025 The Pybind Development Team.
// All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#pragma once
#include "pytypes.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
/// This does not do anything if there's a GIL. On free-threaded Python,
/// it locks an object. This uses the CPython API, which has limits
class scoped_critical_section {
public:
#ifdef Py_GIL_DISABLED
explicit scoped_critical_section(handle obj1, handle obj2 = handle{}) {
if (obj1) {
if (obj2) {
PyCriticalSection2_Begin(&section2, obj1.ptr(), obj2.ptr());
rank = 2;
} else {
PyCriticalSection_Begin(&section, obj1.ptr());
rank = 1;
}
} else if (obj2) {
PyCriticalSection_Begin(&section, obj2.ptr());
rank = 1;
}
}
~scoped_critical_section() {
if (rank == 1) {
PyCriticalSection_End(&section);
} else if (rank == 2) {
PyCriticalSection2_End(&section2);
}
}
#else
explicit scoped_critical_section(handle, handle = handle{}) {};
~scoped_critical_section() = default;
#endif
scoped_critical_section(const scoped_critical_section &) = delete;
scoped_critical_section &operator=(const scoped_critical_section &) = delete;
private:
#ifdef Py_GIL_DISABLED
int rank{0};
union {
PyCriticalSection section;
PyCriticalSection2 section2;
};
#endif
};
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)

View File

@ -9,18 +9,20 @@
#pragma once
#include "../attr.h"
#include "../options.h"
#include <pybind11/attr.h>
#include <pybind11/options.h>
#include "exception_translation.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
#if PY_VERSION_HEX >= 0x03030000 && !defined(PYPY_VERSION)
#if !defined(PYPY_VERSION)
# define PYBIND11_BUILTIN_QUALNAME
# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj)
#else
// In pre-3.3 Python, we still set __qualname__ so that we can produce reliable function type
// signatures; in 3.3+ this macro expands to nothing:
// In PyPy, we still set __qualname__ so that we can produce reliable function type
// signatures; in CPython this macro expands to nothing:
# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) \
setattr((PyObject *) obj, "__qualname__", nameobj)
#endif
@ -55,6 +57,9 @@ extern "C" inline int pybind11_static_set(PyObject *self, PyObject *obj, PyObjec
return PyProperty_Type.tp_descr_set(self, cls, value);
}
// Forward declaration to use in `make_static_property_type()`
inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type);
/** A `static_property` is the same as a `property` but the `__get__()` and `__set__()`
methods are modified to always use the object type instead of a concrete instance.
Return value: New reference. */
@ -83,11 +88,17 @@ inline PyTypeObject *make_static_property_type() {
type->tp_descr_get = pybind11_static_get;
type->tp_descr_set = pybind11_static_set;
# if PY_VERSION_HEX >= 0x030C0000
// Since Python-3.12 property-derived types are required to
// have dynamic attributes (to set `__doc__`)
enable_dynamic_attributes(heap_type);
# endif
if (PyType_Ready(type) < 0) {
pybind11_fail("make_static_property_type(): failure in PyType_Ready()!");
}
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
setattr((PyObject *) type, "__module__", str(PYBIND11_DUMMY_MODULE_NAME));
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
return type;
@ -155,7 +166,6 @@ extern "C" inline int pybind11_meta_setattro(PyObject *obj, PyObject *name, PyOb
}
}
#if PY_MAJOR_VERSION >= 3
/**
* Python 3's PyInstanceMethod_Type hides itself via its tp_descr_get, which prevents aliasing
* methods via cls.attr("m2") = cls.attr("m1"): instead the tp_descr_get returns a plain function,
@ -170,7 +180,6 @@ extern "C" inline PyObject *pybind11_meta_getattro(PyObject *obj, PyObject *name
}
return PyType_Type.tp_getattro(obj, name);
}
#endif
/// metaclass `__call__` function that is used to create all pybind11 objects.
extern "C" inline PyObject *pybind11_meta_call(PyObject *type, PyObject *args, PyObject *kwargs) {
@ -181,12 +190,10 @@ extern "C" inline PyObject *pybind11_meta_call(PyObject *type, PyObject *args, P
return nullptr;
}
// This must be a pybind11 instance
auto *instance = reinterpret_cast<detail::instance *>(self);
// Ensure that the base __init__ function(s) were called
for (const auto &vh : values_and_holders(instance)) {
if (!vh.holder_constructed()) {
values_and_holders vhs(self);
for (const auto &vh : vhs) {
if (!vh.holder_constructed() && !vhs.is_redundant_value_and_holder(vh)) {
PyErr_Format(PyExc_TypeError,
"%.200s.__init__() must be called when overriding __init__",
get_fully_qualified_tp_name(vh.type->type).c_str());
@ -200,39 +207,40 @@ extern "C" inline PyObject *pybind11_meta_call(PyObject *type, PyObject *args, P
/// Cleanup the type-info for a pybind11-registered type.
extern "C" inline void pybind11_meta_dealloc(PyObject *obj) {
auto *type = (PyTypeObject *) obj;
auto &internals = get_internals();
with_internals([obj](internals &internals) {
auto *type = (PyTypeObject *) obj;
// A pybind11-registered type will:
// 1) be found in internals.registered_types_py
// 2) have exactly one associated `detail::type_info`
auto found_type = internals.registered_types_py.find(type);
if (found_type != internals.registered_types_py.end() && found_type->second.size() == 1
&& found_type->second[0]->type == type) {
// A pybind11-registered type will:
// 1) be found in internals.registered_types_py
// 2) have exactly one associated `detail::type_info`
auto found_type = internals.registered_types_py.find(type);
if (found_type != internals.registered_types_py.end() && found_type->second.size() == 1
&& found_type->second[0]->type == type) {
auto *tinfo = found_type->second[0];
auto tindex = std::type_index(*tinfo->cpptype);
internals.direct_conversions.erase(tindex);
auto *tinfo = found_type->second[0];
auto tindex = std::type_index(*tinfo->cpptype);
internals.direct_conversions.erase(tindex);
if (tinfo->module_local) {
get_local_internals().registered_types_cpp.erase(tindex);
} else {
internals.registered_types_cpp.erase(tindex);
}
internals.registered_types_py.erase(tinfo->type);
// Actually just `std::erase_if`, but that's only available in C++20
auto &cache = internals.inactive_override_cache;
for (auto it = cache.begin(), last = cache.end(); it != last;) {
if (it->first == (PyObject *) tinfo->type) {
it = cache.erase(it);
if (tinfo->module_local) {
get_local_internals().registered_types_cpp.erase(tindex);
} else {
++it;
internals.registered_types_cpp.erase(tindex);
}
}
internals.registered_types_py.erase(tinfo->type);
delete tinfo;
}
// Actually just `std::erase_if`, but that's only available in C++20
auto &cache = internals.inactive_override_cache;
for (auto it = cache.begin(), last = cache.end(); it != last;) {
if (it->first == (PyObject *) tinfo->type) {
it = cache.erase(it);
} else {
++it;
}
}
delete tinfo;
}
});
PyType_Type.tp_dealloc(obj);
}
@ -266,9 +274,7 @@ inline PyTypeObject *make_default_metaclass() {
type->tp_call = pybind11_meta_call;
type->tp_setattro = pybind11_meta_setattro;
#if PY_MAJOR_VERSION >= 3
type->tp_getattro = pybind11_meta_getattro;
#endif
type->tp_dealloc = pybind11_meta_dealloc;
@ -276,7 +282,7 @@ inline PyTypeObject *make_default_metaclass() {
pybind11_fail("make_default_metaclass(): failure in PyType_Ready()!");
}
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
setattr((PyObject *) type, "__module__", str(PYBIND11_DUMMY_MODULE_NAME));
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
return type;
@ -306,20 +312,45 @@ inline void traverse_offset_bases(void *valueptr,
}
}
#ifdef Py_GIL_DISABLED
inline void enable_try_inc_ref(PyObject *obj) {
// TODO: Replace with PyUnstable_Object_EnableTryIncRef when available.
// See https://github.com/python/cpython/issues/128844
if (_Py_IsImmortal(obj)) {
return;
}
for (;;) {
Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&obj->ob_ref_shared);
if ((shared & _Py_REF_SHARED_FLAG_MASK) != 0) {
// Nothing to do if it's in WEAKREFS, QUEUED, or MERGED states.
return;
}
if (_Py_atomic_compare_exchange_ssize(
&obj->ob_ref_shared, &shared, shared | _Py_REF_MAYBE_WEAKREF)) {
return;
}
}
}
#endif
inline bool register_instance_impl(void *ptr, instance *self) {
get_internals().registered_instances.emplace(ptr, self);
#ifdef Py_GIL_DISABLED
enable_try_inc_ref(reinterpret_cast<PyObject *>(self));
#endif
with_instance_map(ptr, [&](instance_map &instances) { instances.emplace(ptr, self); });
return true; // unused, but gives the same signature as the deregister func
}
inline bool deregister_instance_impl(void *ptr, instance *self) {
auto &registered_instances = get_internals().registered_instances;
auto range = registered_instances.equal_range(ptr);
for (auto it = range.first; it != range.second; ++it) {
if (self == it->second) {
registered_instances.erase(it);
return true;
return with_instance_map(ptr, [&](instance_map &instances) {
auto range = instances.equal_range(ptr);
for (auto it = range.first; it != range.second; ++it) {
if (self == it->second) {
instances.erase(it);
return true;
}
}
}
return false;
return false;
});
}
inline void register_instance(instance *self, void *valptr, const type_info *tinfo) {
@ -369,28 +400,37 @@ extern "C" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *,
extern "C" inline int pybind11_object_init(PyObject *self, PyObject *, PyObject *) {
PyTypeObject *type = Py_TYPE(self);
std::string msg = get_fully_qualified_tp_name(type) + ": No constructor defined!";
PyErr_SetString(PyExc_TypeError, msg.c_str());
set_error(PyExc_TypeError, msg.c_str());
return -1;
}
inline void add_patient(PyObject *nurse, PyObject *patient) {
auto &internals = get_internals();
auto *instance = reinterpret_cast<detail::instance *>(nurse);
instance->has_patients = true;
Py_INCREF(patient);
internals.patients[nurse].push_back(patient);
with_internals([&](internals &internals) { internals.patients[nurse].push_back(patient); });
}
inline void clear_patients(PyObject *self) {
auto *instance = reinterpret_cast<detail::instance *>(self);
auto &internals = get_internals();
auto pos = internals.patients.find(self);
assert(pos != internals.patients.end());
// Clearing the patients can cause more Python code to run, which
// can invalidate the iterator. Extract the vector of patients
// from the unordered_map first.
auto patients = std::move(pos->second);
internals.patients.erase(pos);
std::vector<PyObject *> patients;
with_internals([&](internals &internals) {
auto pos = internals.patients.find(self);
if (pos == internals.patients.end()) {
pybind11_fail(
"FATAL: Internal consistency check failed: Invalid clear_patients() call.");
}
// Clearing the patients can cause more Python code to run, which
// can invalidate the iterator. Extract the vector of patients
// from the unordered_map first.
patients = std::move(pos->second);
internals.patients.erase(pos);
});
instance->has_patients = false;
for (PyObject *&patient : patients) {
Py_CLEAR(patient);
@ -417,6 +457,8 @@ inline void clear_instance(PyObject *self) {
if (instance->owned || v_h.holder_constructed()) {
v_h.type->dealloc(v_h);
}
} else if (v_h.holder_constructed()) {
v_h.type->dealloc(v_h); // Disowned instance.
}
}
// Deallocate the value/holder layout internals:
@ -439,26 +481,31 @@ inline void clear_instance(PyObject *self) {
/// Instance destructor function for all pybind11 types. It calls `type_info.dealloc`
/// to destroy the C++ object itself, while the rest is Python bookkeeping.
extern "C" inline void pybind11_object_dealloc(PyObject *self) {
auto *type = Py_TYPE(self);
// If this is a GC tracked object, untrack it first
// Note that the track call is implicitly done by the
// default tp_alloc, which we never override.
if (PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC) != 0) {
PyObject_GC_UnTrack(self);
}
clear_instance(self);
auto *type = Py_TYPE(self);
type->tp_free(self);
#if PY_VERSION_HEX < 0x03080000
// `type->tp_dealloc != pybind11_object_dealloc` means that we're being called
// as part of a derived type's dealloc, in which case we're not allowed to decref
// the type here. For cross-module compatibility, we shouldn't compare directly
// with `pybind11_object_dealloc`, but with the common one stashed in internals.
auto pybind11_object_type = (PyTypeObject *) get_internals().instance_base;
if (type->tp_dealloc == pybind11_object_type->tp_dealloc)
Py_DECREF(type);
#else
// This was not needed before Python 3.8 (Python issue 35810)
// https://github.com/pybind/pybind11/issues/1946
Py_DECREF(type);
#endif
}
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_GCC("-Wredundant-decls")
std::string error_string();
PYBIND11_WARNING_POP
/** Create the type which can be used as a common base for all classes. This is
needed in order to satisfy Python's requirements for multiple inheritance.
Return value: New reference. */
@ -494,52 +541,39 @@ inline PyObject *make_object_base_type(PyTypeObject *metaclass) {
type->tp_weaklistoffset = offsetof(instance, weakrefs);
if (PyType_Ready(type) < 0) {
pybind11_fail("PyType_Ready failed in make_object_base_type():" + error_string());
pybind11_fail("PyType_Ready failed in make_object_base_type(): " + error_string());
}
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
setattr((PyObject *) type, "__module__", str(PYBIND11_DUMMY_MODULE_NAME));
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
assert(!PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));
return (PyObject *) heap_type;
}
/// dynamic_attr: Support for `d = instance.__dict__`.
extern "C" inline PyObject *pybind11_get_dict(PyObject *self, void *) {
PyObject *&dict = *_PyObject_GetDictPtr(self);
if (!dict) {
dict = PyDict_New();
}
Py_XINCREF(dict);
return dict;
}
/// dynamic_attr: Support for `instance.__dict__ = dict()`.
extern "C" inline int pybind11_set_dict(PyObject *self, PyObject *new_dict, void *) {
if (!PyDict_Check(new_dict)) {
PyErr_Format(PyExc_TypeError,
"__dict__ must be set to a dictionary, not a '%.200s'",
get_fully_qualified_tp_name(Py_TYPE(new_dict)).c_str());
return -1;
}
PyObject *&dict = *_PyObject_GetDictPtr(self);
Py_INCREF(new_dict);
Py_CLEAR(dict);
dict = new_dict;
return 0;
}
/// dynamic_attr: Allow the garbage collector to traverse the internal instance `__dict__`.
extern "C" inline int pybind11_traverse(PyObject *self, visitproc visit, void *arg) {
#if PY_VERSION_HEX >= 0x030D0000
PyObject_VisitManagedDict(self, visit, arg);
#else
PyObject *&dict = *_PyObject_GetDictPtr(self);
Py_VISIT(dict);
#endif
// https://docs.python.org/3/c-api/typeobj.html#c.PyTypeObject.tp_traverse
#if PY_VERSION_HEX >= 0x03090000
Py_VISIT(Py_TYPE(self));
#endif
return 0;
}
/// dynamic_attr: Allow the GC to clear the dictionary.
extern "C" inline int pybind11_clear(PyObject *self) {
#if PY_VERSION_HEX >= 0x030D0000
PyObject_ClearManagedDict(self);
#else
PyObject *&dict = *_PyObject_GetDictPtr(self);
Py_CLEAR(dict);
#endif
return 0;
}
@ -547,14 +581,18 @@ extern "C" inline int pybind11_clear(PyObject *self) {
inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type) {
auto *type = &heap_type->ht_type;
type->tp_flags |= Py_TPFLAGS_HAVE_GC;
#ifdef PYBIND11_BACKWARD_COMPATIBILITY_TP_DICTOFFSET
type->tp_dictoffset = type->tp_basicsize; // place dict at the end
type->tp_basicsize += (ssize_t) sizeof(PyObject *); // and allocate enough space for it
#else
type->tp_flags |= Py_TPFLAGS_MANAGED_DICT;
#endif
type->tp_traverse = pybind11_traverse;
type->tp_clear = pybind11_clear;
static PyGetSetDef getset[] = {
{const_cast<char *>("__dict__"), pybind11_get_dict, pybind11_set_dict, nullptr, nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr}};
static PyGetSetDef getset[]
= {{"__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict, nullptr, nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr}};
type->tp_getset = getset;
}
@ -572,35 +610,89 @@ extern "C" inline int pybind11_getbuffer(PyObject *obj, Py_buffer *view, int fla
if (view) {
view->obj = nullptr;
}
PyErr_SetString(PyExc_BufferError, "pybind11_getbuffer(): Internal error");
set_error(PyExc_BufferError, "pybind11_getbuffer(): Internal error");
return -1;
}
std::memset(view, 0, sizeof(Py_buffer));
buffer_info *info = tinfo->get_buffer(obj, tinfo->get_buffer_data);
if ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE && info->readonly) {
delete info;
// view->obj = nullptr; // Was just memset to 0, so not necessary
PyErr_SetString(PyExc_BufferError, "Writable buffer requested for readonly storage");
std::unique_ptr<buffer_info> info = nullptr;
try {
info.reset(tinfo->get_buffer(obj, tinfo->get_buffer_data));
} catch (...) {
try_translate_exceptions();
raise_from(PyExc_BufferError, "Error getting buffer");
return -1;
}
view->obj = obj;
view->ndim = 1;
view->internal = info;
view->buf = info->ptr;
if (info == nullptr) {
pybind11_fail("FATAL UNEXPECTED SITUATION: tinfo->get_buffer() returned nullptr.");
}
if ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE && info->readonly) {
// view->obj = nullptr; // Was just memset to 0, so not necessary
set_error(PyExc_BufferError, "Writable buffer requested for readonly storage");
return -1;
}
// Fill in all the information, and then downgrade as requested by the caller, or raise an
// error if that's not possible.
view->itemsize = info->itemsize;
view->len = view->itemsize;
for (auto s : info->shape) {
view->len *= s;
}
view->ndim = static_cast<int>(info->ndim);
view->shape = info->shape.data();
view->strides = info->strides.data();
view->readonly = static_cast<int>(info->readonly);
if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT) {
view->format = const_cast<char *>(info->format.c_str());
}
if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) {
view->ndim = (int) info->ndim;
view->strides = info->strides.data();
view->shape = info->shape.data();
// Note, all contiguity flags imply PyBUF_STRIDES and lower.
if ((flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) {
if (PyBuffer_IsContiguous(view, 'C') == 0) {
std::memset(view, 0, sizeof(Py_buffer));
set_error(PyExc_BufferError,
"C-contiguous buffer requested for discontiguous storage");
return -1;
}
} else if ((flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) {
if (PyBuffer_IsContiguous(view, 'F') == 0) {
std::memset(view, 0, sizeof(Py_buffer));
set_error(PyExc_BufferError,
"Fortran-contiguous buffer requested for discontiguous storage");
return -1;
}
} else if ((flags & PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS) {
if (PyBuffer_IsContiguous(view, 'A') == 0) {
std::memset(view, 0, sizeof(Py_buffer));
set_error(PyExc_BufferError, "Contiguous buffer requested for discontiguous storage");
return -1;
}
} else if ((flags & PyBUF_STRIDES) != PyBUF_STRIDES) {
// If no strides are requested, the buffer must be C-contiguous.
// https://docs.python.org/3/c-api/buffer.html#contiguity-requests
if (PyBuffer_IsContiguous(view, 'C') == 0) {
std::memset(view, 0, sizeof(Py_buffer));
set_error(PyExc_BufferError,
"C-contiguous buffer requested for discontiguous storage");
return -1;
}
view->strides = nullptr;
// Since this is a contiguous buffer, it can also pretend to be 1D.
if ((flags & PyBUF_ND) != PyBUF_ND) {
view->shape = nullptr;
view->ndim = 0;
}
}
// Set these after all checks so they don't leak out into the caller, and can be automatically
// cleaned up on error.
view->buf = info->ptr;
view->internal = info.release();
view->obj = obj;
Py_INCREF(view->obj);
return 0;
}
@ -613,9 +705,6 @@ extern "C" inline void pybind11_releasebuffer(PyObject *, Py_buffer *view) {
/// Give this type a buffer interface.
inline void enable_buffer_protocol(PyHeapTypeObject *heap_type) {
heap_type->ht_type.tp_as_buffer = &heap_type->as_buffer;
#if PY_MAJOR_VERSION < 3
heap_type->ht_type.tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER;
#endif
heap_type->as_buffer.bf_getbuffer = pybind11_getbuffer;
heap_type->as_buffer.bf_releasebuffer = pybind11_releasebuffer;
@ -628,23 +717,11 @@ inline PyObject *make_new_python_type(const type_record &rec) {
auto qualname = name;
if (rec.scope && !PyModule_Check(rec.scope.ptr()) && hasattr(rec.scope, "__qualname__")) {
#if PY_MAJOR_VERSION >= 3
qualname = reinterpret_steal<object>(
PyUnicode_FromFormat("%U.%U", rec.scope.attr("__qualname__").ptr(), name.ptr()));
#else
qualname = str(rec.scope.attr("__qualname__").cast<std::string>() + "." + rec.name);
#endif
}
object module_;
if (rec.scope) {
if (hasattr(rec.scope, "__module__")) {
module_ = rec.scope.attr("__module__");
} else if (hasattr(rec.scope, "__name__")) {
module_ = rec.scope.attr("__name__");
}
}
object module_ = get_module_name_if_available(rec.scope);
const auto *full_name = c_str(
#if !defined(PYPY_VERSION)
module_ ? str(module_).cast<std::string>() + "." + rec.name :
@ -653,10 +730,13 @@ inline PyObject *make_new_python_type(const type_record &rec) {
char *tp_doc = nullptr;
if (rec.doc && options::show_user_defined_docstrings()) {
/* Allocate memory for docstring (using PyObject_MALLOC, since
Python will free this later on) */
/* Allocate memory for docstring (Python will free this later on) */
size_t size = std::strlen(rec.doc) + 1;
#if PY_VERSION_HEX >= 0x030D0000
tp_doc = (char *) PyMem_MALLOC(size);
#else
tp_doc = (char *) PyObject_MALLOC(size);
#endif
std::memcpy((void *) tp_doc, rec.doc, size);
}
@ -697,15 +777,10 @@ inline PyObject *make_new_python_type(const type_record &rec) {
type->tp_as_number = &heap_type->as_number;
type->tp_as_sequence = &heap_type->as_sequence;
type->tp_as_mapping = &heap_type->as_mapping;
#if PY_VERSION_HEX >= 0x03050000
type->tp_as_async = &heap_type->as_async;
#endif
/* Flags */
type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE;
#if PY_MAJOR_VERSION < 3
type->tp_flags |= Py_TPFLAGS_CHECKTYPES;
#endif
if (!rec.is_final) {
type->tp_flags |= Py_TPFLAGS_BASETYPE;
}
@ -723,7 +798,7 @@ inline PyObject *make_new_python_type(const type_record &rec) {
}
if (PyType_Ready(type) < 0) {
pybind11_fail(std::string(rec.name) + ": PyType_Ready failed (" + error_string() + ")!");
pybind11_fail(std::string(rec.name) + ": PyType_Ready failed: " + error_string());
}
assert(!rec.dynamic_attr || PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));

View File

@ -9,27 +9,42 @@
#pragma once
#define PYBIND11_VERSION_MAJOR 2
#define PYBIND11_VERSION_MINOR 9
#define PYBIND11_VERSION_PATCH 2
#include <pybind11/conduit/wrap_include_python_h.h>
#if PY_VERSION_HEX < 0x03080000
# error "PYTHON < 3.8 IS UNSUPPORTED. pybind11 v2.13 was the last to support Python 3.7."
#endif
// Similar to Python's convention: https://docs.python.org/3/c-api/apiabiversion.html
// Additional convention: 0xD = dev
#define PYBIND11_VERSION_HEX 0x02090200
// See also: https://github.com/python/cpython/blob/HEAD/Include/patchlevel.h
/* -- start version constants -- */
#define PYBIND11_VERSION_MAJOR 3
#define PYBIND11_VERSION_MINOR 0
#define PYBIND11_VERSION_MICRO 2
// ALPHA = 0xA, BETA = 0xB, GAMMA = 0xC (release candidate), FINAL = 0xF (stable release)
// - The release level is set to "alpha" for development versions.
// Use 0xA0 (LEVEL=0xA, SERIAL=0) for development versions.
// - For stable releases, set the serial to 0.
#define PYBIND11_VERSION_RELEASE_LEVEL PY_RELEASE_LEVEL_ALPHA
#define PYBIND11_VERSION_RELEASE_SERIAL 0
// String version of (micro, release level, release serial), e.g.: 0a0, 0b1, 0rc1, 0
#define PYBIND11_VERSION_PATCH 2a0
/* -- end version constants -- */
#define PYBIND11_NAMESPACE_BEGIN(name) namespace name {
#define PYBIND11_NAMESPACE_END(name) }
// Robust support for some features and loading modules compiled against different pybind versions
// requires forcing hidden visibility on pybind code, so we enforce this by setting the attribute
// on the main `pybind11` namespace.
#if !defined(PYBIND11_NAMESPACE)
# ifdef __GNUG__
# define PYBIND11_NAMESPACE pybind11 __attribute__((visibility("hidden")))
# else
# define PYBIND11_NAMESPACE pybind11
# endif
#if !defined(Py_PACK_FULL_VERSION)
// Stable API since Python 3.14.0a4
# define Py_PACK_FULL_VERSION(X, Y, Z, LEVEL, SERIAL) \
((((X) & 0xff) << 24) | (((Y) & 0xff) << 16) | (((Z) & 0xff) << 8) \
| (((LEVEL) & 0xf) << 4) | (((SERIAL) & 0xf) << 0))
#endif
// Version as a single 4-byte hex number, e.g. 0x030C04B5 == 3.12.4b5.
#define PYBIND11_VERSION_HEX \
Py_PACK_FULL_VERSION(PYBIND11_VERSION_MAJOR, \
PYBIND11_VERSION_MINOR, \
PYBIND11_VERSION_MICRO, \
PYBIND11_VERSION_RELEASE_LEVEL, \
PYBIND11_VERSION_RELEASE_SERIAL)
#include "pybind11_namespace_macros.h"
#if !(defined(_MSC_VER) && __cplusplus == 199711L)
# if __cplusplus >= 201402L
@ -38,6 +53,7 @@
# define PYBIND11_CPP17
# if __cplusplus >= 202002L
# define PYBIND11_CPP20
// Please update tests/pybind11_tests.cpp `cpp_std()` when adding a macro here.
# endif
# endif
# endif
@ -47,7 +63,7 @@
// or newer.
# if _MSVC_LANG >= 201402L
# define PYBIND11_CPP14
# if _MSVC_LANG > 201402L && _MSC_VER >= 1910
# if _MSVC_LANG > 201402L
# define PYBIND11_CPP17
# if _MSVC_LANG >= 202002L
# define PYBIND11_CPP20
@ -56,6 +72,37 @@
# endif
#endif
// These PYBIND11_HAS_... macros are consolidated in pybind11/detail/common.h
// to simplify backward compatibility handling for users (e.g., via #ifdef checks):
#define PYBIND11_HAS_TYPE_CASTER_STD_FUNCTION_SPECIALIZATIONS 1
#define PYBIND11_HAS_INTERNALS_WITH_SMART_HOLDER_SUPPORT 1
#define PYBIND11_HAS_CPP_CONDUIT 1
#define PYBIND11_HAS_NATIVE_ENUM 1
#if defined(PYBIND11_CPP17) && defined(__has_include)
# if __has_include(<filesystem>)
# define PYBIND11_HAS_FILESYSTEM 1
# elif __has_include(<experimental/filesystem>)
# define PYBIND11_HAS_EXPERIMENTAL_FILESYSTEM 1
# endif
#endif
#if defined(__cpp_lib_launder) && !(defined(_MSC_VER) && (_MSC_VER < 1914))
# define PYBIND11_STD_LAUNDER std::launder
# define PYBIND11_HAS_STD_LAUNDER 1
#else
# define PYBIND11_STD_LAUNDER
# define PYBIND11_HAS_STD_LAUNDER 0
#endif
#if defined(PYBIND11_CPP20)
# define PYBIND11_CONSTINIT constinit
# define PYBIND11_DTOR_CONSTEXPR constexpr
#else
# define PYBIND11_CONSTINIT
# define PYBIND11_DTOR_CONSTEXPR
#endif
// Compiler version assertions
#if defined(__INTEL_COMPILER)
# if __INTEL_COMPILER < 1800
@ -81,10 +128,8 @@
# error pybind11 requires gcc 4.8 or newer
# endif
#elif defined(_MSC_VER)
// Pybind hits various compiler bugs in 2015u2 and earlier, and also makes use of some stl features
// (e.g. std::negation) added in 2015u3:
# if _MSC_FULL_VER < 190024210
# error pybind11 requires MSVC 2015 update 3 or newer
# if _MSC_VER < 1910
# error pybind11 2.10+ requires MSVC 2017 or newer
# endif
#endif
@ -96,17 +141,6 @@
# endif
#endif
#if !defined(PYBIND11_EXPORT_EXCEPTION)
# ifdef __MINGW32__
// workaround for:
// error: 'dllexport' implies default visibility, but xxx has already been declared with a
// different visibility
# define PYBIND11_EXPORT_EXCEPTION
# else
# define PYBIND11_EXPORT_EXCEPTION PYBIND11_EXPORT
# endif
#endif
// For CUDA, GCC7, GCC8:
// PYBIND11_NOINLINE_FORCED is incompatible with `-Wattributes -Werror`.
// When defining PYBIND11_NOINLINE_FORCED, it is best to also use `-Wno-attributes`.
@ -147,19 +181,6 @@
# define PYBIND11_MAYBE_UNUSED __attribute__((__unused__))
#endif
/* Don't let Python.h #define (v)snprintf as macro because they are implemented
properly in Visual Studio since 2015. */
#if defined(_MSC_VER) && _MSC_VER >= 1900
# define HAVE_SNPRINTF 1
#endif
#if defined(_MSC_VER)
# pragma warning(push)
// C4505: 'PySlice_GetIndicesEx': unreferenced local function has been removed (PyPy only)
# pragma warning(disable : 4505)
# pragma warning(disable: 4510 4610 4512 4005)
#endif
// https://en.cppreference.com/w/c/chrono/localtime
#if defined(__STDC_LIB_EXT1__) && !defined(__STDC_WANT_LIB_EXT1__)
# define __STDC_WANT_LIB_EXT1__
@ -184,47 +205,17 @@
# define PYBIND11_HAS_VARIANT 1
#endif
#if defined(PYBIND11_CPP17)
# if defined(__has_include)
# if __has_include(<string_view>)
# define PYBIND11_HAS_STRING_VIEW
# endif
# elif defined(_MSC_VER)
# define PYBIND11_HAS_STRING_VIEW
# endif
#if defined(PYBIND11_CPP17) \
&& ((defined(__has_include) && __has_include(<string_view>)) || defined(_MSC_VER))
# define PYBIND11_HAS_STRING_VIEW 1
#endif
#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L
# define PYBIND11_HAS_U8STRING
#endif
#include <Python.h>
#include <frameobject.h>
#include <pythread.h>
/* Python #defines overrides on all sorts of core functions, which
tends to weak havok in C++ codebases that expect these to work
like regular functions (potentially with several overloads) */
#if defined(isalnum)
# undef isalnum
# undef isalpha
# undef islower
# undef isspace
# undef isupper
# undef tolower
# undef toupper
#endif
#if defined(copysign)
# undef copysign
#if (defined(PYPY_VERSION) || defined(GRAALVM_PYTHON)) && !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
# define PYBIND11_SIMPLE_GIL_MANAGEMENT
#endif
#if defined(_MSC_VER)
# if defined(PYBIND11_DEBUG_MARKER)
# define _DEBUG
# undef PYBIND11_DEBUG_MARKER
# endif
# pragma warning(pop)
PYBIND11_WARNING_DISABLE_MSVC(4510 4610 4512 4005)
#endif
#include <cstddef>
@ -245,6 +236,72 @@
# endif
#endif
// For libc++, the exceptions should be exported,
// otherwise, the exception translation would be incorrect.
// IMPORTANT: This code block must stay BELOW the #include <exception> above (see PR #5390).
#if !defined(PYBIND11_EXPORT_EXCEPTION)
# if defined(_LIBCPP_EXCEPTION)
# define PYBIND11_EXPORT_EXCEPTION PYBIND11_EXPORT
# else
# define PYBIND11_EXPORT_EXCEPTION
# endif
#endif
// Must be after including <version> or one of the other headers specified by the standard
#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L
# define PYBIND11_HAS_U8STRING 1
#endif
// See description of PR #4246:
#if !defined(PYBIND11_NO_ASSERT_GIL_HELD_INCREF_DECREF) && !defined(NDEBUG) \
&& !defined(PYPY_VERSION) && !defined(PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF)
# define PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF
#endif
// Slightly faster code paths are available when PYBIND11_HAS_SUBINTERPRETER_SUPPORT is *not*
// defined, so avoid defining it for implementations that do not support subinterpreters. However,
// defining it unnecessarily is not expected to break anything.
// This can be overridden by the user with -DPYBIND11_HAS_SUBINTERPRETER_SUPPORT=1 or 0
#ifndef PYBIND11_HAS_SUBINTERPRETER_SUPPORT
# if PY_VERSION_HEX >= 0x030C0000 && !defined(PYPY_VERSION) && !defined(GRAALVM_PYTHON)
# define PYBIND11_HAS_SUBINTERPRETER_SUPPORT 1
# endif
#else
# if PYBIND11_HAS_SUBINTERPRETER_SUPPORT == 0
# undef PYBIND11_HAS_SUBINTERPRETER_SUPPORT
# endif
#endif
// 3.13 Compatibility
#if 0x030D0000 <= PY_VERSION_HEX
# define PYBIND11_TYPE_IS_TYPE_HINT "typing.TypeIs"
# define PYBIND11_CAPSULE_TYPE_TYPE_HINT "types.CapsuleType"
#else
# define PYBIND11_TYPE_IS_TYPE_HINT "typing_extensions.TypeIs"
# define PYBIND11_CAPSULE_TYPE_TYPE_HINT "typing_extensions.CapsuleType"
#endif
// 3.12 Compatibility
#if 0x030C0000 <= PY_VERSION_HEX
# define PYBIND11_BUFFER_TYPE_HINT "collections.abc.Buffer"
#else
# define PYBIND11_BUFFER_TYPE_HINT "typing_extensions.Buffer"
#endif
// 3.11 Compatibility
#if 0x030B0000 <= PY_VERSION_HEX
# define PYBIND11_NEVER_TYPE_HINT "typing.Never"
#else
# define PYBIND11_NEVER_TYPE_HINT "typing_extensions.Never"
#endif
// 3.10 Compatibility
#if 0x030A0000 <= PY_VERSION_HEX
# define PYBIND11_TYPE_GUARD_TYPE_HINT "typing.TypeGuard"
#else
# define PYBIND11_TYPE_GUARD_TYPE_HINT "typing_extensions.TypeGuard"
#endif
// #define PYBIND11_STR_LEGACY_PERMISSIVE
// If DEFINED, pybind11::str can hold PyUnicodeObject or PyBytesObject
// (probably surprising and never documented, but this was the
@ -254,84 +311,63 @@
// If UNDEFINED, pybind11::str can only hold PyUnicodeObject, and
// pybind11::isinstance<str>() is true only for pybind11::str.
// However, for Python 2 only (!), the pybind11::str caster
// implicitly decodes bytes to PyUnicodeObject. This is to ease
// implicitly decoded bytes to PyUnicodeObject. This was to ease
// the transition from the legacy behavior to the non-permissive
// behavior.
#if PY_MAJOR_VERSION >= 3 /// Compatibility macros for various Python versions
# define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyInstanceMethod_New(ptr)
# define PYBIND11_INSTANCE_METHOD_CHECK PyInstanceMethod_Check
# define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyInstanceMethod_GET_FUNCTION
# define PYBIND11_BYTES_CHECK PyBytes_Check
# define PYBIND11_BYTES_FROM_STRING PyBytes_FromString
# define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyBytes_FromStringAndSize
# define PYBIND11_BYTES_AS_STRING_AND_SIZE PyBytes_AsStringAndSize
# define PYBIND11_BYTES_AS_STRING PyBytes_AsString
# define PYBIND11_BYTES_SIZE PyBytes_Size
# define PYBIND11_LONG_CHECK(o) PyLong_Check(o)
# define PYBIND11_LONG_AS_LONGLONG(o) PyLong_AsLongLong(o)
# define PYBIND11_LONG_FROM_SIGNED(o) PyLong_FromSsize_t((ssize_t) (o))
# define PYBIND11_LONG_FROM_UNSIGNED(o) PyLong_FromSize_t((size_t) (o))
# define PYBIND11_BYTES_NAME "bytes"
# define PYBIND11_STRING_NAME "str"
# define PYBIND11_SLICE_OBJECT PyObject
# define PYBIND11_FROM_STRING PyUnicode_FromString
# define PYBIND11_STR_TYPE ::pybind11::str
# define PYBIND11_BOOL_ATTR "__bool__"
# define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool)
# define PYBIND11_BUILTINS_MODULE "builtins"
/// Compatibility macros for Python 2 / Python 3 versions TODO: remove
#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyInstanceMethod_New(ptr)
#define PYBIND11_INSTANCE_METHOD_CHECK PyInstanceMethod_Check
#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyInstanceMethod_GET_FUNCTION
#define PYBIND11_BYTES_CHECK PyBytes_Check
#define PYBIND11_BYTES_FROM_STRING PyBytes_FromString
#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyBytes_FromStringAndSize
#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyBytes_AsStringAndSize
#define PYBIND11_BYTES_AS_STRING PyBytes_AsString
#define PYBIND11_BYTES_SIZE PyBytes_Size
#define PYBIND11_LONG_CHECK(o) PyLong_Check(o)
#define PYBIND11_LONG_AS_LONGLONG(o) PyLong_AsLongLong(o)
#define PYBIND11_LONG_FROM_SIGNED(o) PyLong_FromSsize_t((ssize_t) (o))
#define PYBIND11_LONG_FROM_UNSIGNED(o) PyLong_FromSize_t((size_t) (o))
#define PYBIND11_BYTES_NAME "bytes"
#define PYBIND11_STRING_NAME "str"
#define PYBIND11_SLICE_OBJECT PyObject
#define PYBIND11_FROM_STRING PyUnicode_FromString
#define PYBIND11_STR_TYPE ::pybind11::str
#define PYBIND11_BOOL_ATTR "__bool__"
#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool)
#define PYBIND11_BUILTINS_MODULE "builtins"
// Providing a separate declaration to make Clang's -Wmissing-prototypes happy.
// See comment for PYBIND11_MODULE below for why this is marked "maybe unused".
# define PYBIND11_PLUGIN_IMPL(name) \
extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT PyObject *PyInit_##name(); \
extern "C" PYBIND11_EXPORT PyObject *PyInit_##name()
#else
# define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyMethod_New(ptr, nullptr, class_)
# define PYBIND11_INSTANCE_METHOD_CHECK PyMethod_Check
# define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyMethod_GET_FUNCTION
# define PYBIND11_BYTES_CHECK PyString_Check
# define PYBIND11_BYTES_FROM_STRING PyString_FromString
# define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyString_FromStringAndSize
# define PYBIND11_BYTES_AS_STRING_AND_SIZE PyString_AsStringAndSize
# define PYBIND11_BYTES_AS_STRING PyString_AsString
# define PYBIND11_BYTES_SIZE PyString_Size
# define PYBIND11_LONG_CHECK(o) (PyInt_Check(o) || PyLong_Check(o))
# define PYBIND11_LONG_AS_LONGLONG(o) \
(PyInt_Check(o) ? (long long) PyLong_AsLong(o) : PyLong_AsLongLong(o))
# define PYBIND11_LONG_FROM_SIGNED(o) PyInt_FromSsize_t((ssize_t) o) // Returns long if needed.
# define PYBIND11_LONG_FROM_UNSIGNED(o) PyInt_FromSize_t((size_t) o) // Returns long if needed.
# define PYBIND11_BYTES_NAME "str"
# define PYBIND11_STRING_NAME "unicode"
# define PYBIND11_SLICE_OBJECT PySliceObject
# define PYBIND11_FROM_STRING PyString_FromString
# define PYBIND11_STR_TYPE ::pybind11::bytes
# define PYBIND11_BOOL_ATTR "__nonzero__"
# define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_nonzero)
# define PYBIND11_BUILTINS_MODULE "__builtin__"
// Providing a separate PyInit decl to make Clang's -Wmissing-prototypes happy.
// See comment for PYBIND11_MODULE below for why this is marked "maybe unused".
# define PYBIND11_PLUGIN_IMPL(name) \
static PyObject *pybind11_init_wrapper(); \
extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT void init##name(); \
extern "C" PYBIND11_EXPORT void init##name() { (void) pybind11_init_wrapper(); } \
PyObject *pybind11_init_wrapper()
#endif
#if PY_VERSION_HEX >= 0x03050000 && PY_VERSION_HEX < 0x03050200
extern "C" {
struct _Py_atomic_address {
void *value;
};
PyAPI_DATA(_Py_atomic_address) _PyThreadState_Current;
}
#endif
#define PYBIND11_PLUGIN_DECL(name) \
extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT PyObject *PyInit_##name();
#define PYBIND11_PLUGIN_IMPL(name) \
PYBIND11_PLUGIN_DECL(name) \
extern "C" PYBIND11_EXPORT PyObject *PyInit_##name()
#define PYBIND11_TRY_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code
#define PYBIND11_STRINGIFY(x) #x
#define PYBIND11_TOSTRING(x) PYBIND11_STRINGIFY(x)
#define PYBIND11_CONCAT(first, second) first##second
#define PYBIND11_ENSURE_INTERNALS_READY pybind11::detail::get_internals();
#define PYBIND11_ENSURE_INTERNALS_READY \
{ \
pybind11::detail::get_internals_pp_manager().unref(); \
pybind11::detail::get_internals(); \
}
#if !defined(GRAALVM_PYTHON)
# define PYBIND11_PYCFUNCTION_GET_DOC(func) ((func)->m_ml->ml_doc)
# define PYBIND11_PYCFUNCTION_SET_DOC(func, doc) \
do { \
(func)->m_ml->ml_doc = (doc); \
} while (0)
#else
# define PYBIND11_PYCFUNCTION_GET_DOC(func) (GraalPyCFunction_GetDoc((PyObject *) (func)))
# define PYBIND11_PYCFUNCTION_SET_DOC(func, doc) \
do { \
GraalPyCFunction_SetDoc((PyObject *) (func), (doc)); \
} while (0)
#endif
#define PYBIND11_CHECK_PYTHON_VERSION \
{ \
@ -350,31 +386,13 @@ PyAPI_DATA(_Py_atomic_address) _PyThreadState_Current;
} \
}
#if PY_VERSION_HEX >= 0x03030000
# define PYBIND11_CATCH_INIT_EXCEPTIONS \
catch (pybind11::error_already_set & e) { \
pybind11::raise_from(e, PyExc_ImportError, "initialization failed"); \
return nullptr; \
} \
catch (const std::exception &e) { \
PyErr_SetString(PyExc_ImportError, e.what()); \
return nullptr; \
}
#else
# define PYBIND11_CATCH_INIT_EXCEPTIONS \
catch (pybind11::error_already_set & e) { \
PyErr_SetString(PyExc_ImportError, e.what()); \
return nullptr; \
} \
catch (const std::exception &e) { \
PyErr_SetString(PyExc_ImportError, e.what()); \
return nullptr; \
}
#endif
#define PYBIND11_CATCH_INIT_EXCEPTIONS \
catch (pybind11::error_already_set & e) { \
pybind11::raise_from(e, PyExc_ImportError, "initialization failed"); \
} \
catch (const std::exception &e) { \
::pybind11::set_error(PyExc_ImportError, e.what()); \
}
/** \rst
***Deprecated in favor of PYBIND11_MODULE***
@ -401,14 +419,63 @@ PyAPI_DATA(_Py_atomic_address) _PyThreadState_Current;
return pybind11_init(); \
} \
PYBIND11_CATCH_INIT_EXCEPTIONS \
return nullptr; \
} \
PyObject *pybind11_init()
// this push is for the next several macros
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_CLANG("-Wgnu-zero-variadic-macro-arguments")
/**
Create a PyInit_ function for this module.
Note that this is run once for each (sub-)interpreter the module is imported into, including
possibly concurrently. The PyModuleDef is allowed to be static, but the PyObject* resulting from
PyModuleDef_Init should be treated like any other PyObject (so not shared across interpreters).
*/
#define PYBIND11_MODULE_PYINIT(name, pre_init, ...) \
static int PYBIND11_CONCAT(pybind11_exec_, name)(PyObject *); \
PYBIND11_PLUGIN_IMPL(name) { \
PYBIND11_CHECK_PYTHON_VERSION \
pre_init; \
PYBIND11_ENSURE_INTERNALS_READY \
static ::pybind11::detail::slots_array mod_def_slots = ::pybind11::detail::init_slots( \
&PYBIND11_CONCAT(pybind11_exec_, name), ##__VA_ARGS__); \
static PyModuleDef def{/* m_base */ PyModuleDef_HEAD_INIT, \
/* m_name */ PYBIND11_TOSTRING(name), \
/* m_doc */ nullptr, \
/* m_size */ 0, \
/* m_methods */ nullptr, \
/* m_slots */ mod_def_slots.data(), \
/* m_traverse */ nullptr, \
/* m_clear */ nullptr, \
/* m_free */ nullptr}; \
return PyModuleDef_Init(&def); \
}
#define PYBIND11_MODULE_EXEC(name, variable) \
static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \
int PYBIND11_CONCAT(pybind11_exec_, name)(PyObject * pm) { \
try { \
auto m = pybind11::reinterpret_borrow<::pybind11::module_>(pm); \
if (!pybind11::detail::get_cached_module(m.attr("__spec__").attr("name"))) { \
PYBIND11_CONCAT(pybind11_init_, name)(m); \
pybind11::detail::cache_completed_module(m); \
} \
return 0; \
} \
PYBIND11_CATCH_INIT_EXCEPTIONS \
return -1; \
} \
void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ \
& variable) // NOLINT(bugprone-macro-parentheses)
/** \rst
This macro creates the entry point that will be invoked when the Python interpreter
imports an extension module. The module name is given as the fist argument and it
imports an extension module. The module name is given as the first argument and it
should not be in quotes. The second macro argument defines a variable of type
`py::module_` which can be used to initialize the module.
``py::module_`` which can be used to initialize the module.
The entry point is marked as "maybe unused" to aid dead-code detection analysis:
since the entry point is typically only looked up at runtime and not referenced
@ -424,24 +491,32 @@ PyAPI_DATA(_Py_atomic_address) _PyThreadState_Current;
return "Hello, World!";
});
}
The third and subsequent macro arguments are optional (available since 2.13.0), and
can be used to mark the extension module as supporting various Python features.
- ``mod_gil_not_used()``
- ``multiple_interpreters::per_interpreter_gil()``
- ``multiple_interpreters::shared_gil()``
- ``multiple_interpreters::not_supported()``
.. code-block:: cpp
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
m.doc() = "pybind11 example module safe to run without the GIL";
m.def("foo", []() {
return "Hello, Free-threaded World!";
});
}
\endrst */
#define PYBIND11_MODULE(name, variable) \
static ::pybind11::module_::module_def PYBIND11_CONCAT(pybind11_module_def_, name) \
PYBIND11_MAYBE_UNUSED; \
PYBIND11_MAYBE_UNUSED \
static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \
PYBIND11_PLUGIN_IMPL(name) { \
PYBIND11_CHECK_PYTHON_VERSION \
PYBIND11_ENSURE_INTERNALS_READY \
auto m = ::pybind11::module_::create_extension_module( \
PYBIND11_TOSTRING(name), nullptr, &PYBIND11_CONCAT(pybind11_module_def_, name)); \
try { \
PYBIND11_CONCAT(pybind11_init_, name)(m); \
return m.ptr(); \
} \
PYBIND11_CATCH_INIT_EXCEPTIONS \
} \
void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ & (variable))
#define PYBIND11_MODULE(name, variable, ...) \
PYBIND11_MODULE_PYINIT( \
name, (pybind11::detail::get_num_interpreters_seen() += 1), ##__VA_ARGS__) \
PYBIND11_MODULE_EXEC(name, variable)
// pop gnu-zero-variadic-macro-arguments
PYBIND11_WARNING_POP
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
@ -471,7 +546,7 @@ enum class return_value_policy : uint8_t {
/** Reference an existing object (i.e. do not create a new copy) and take
ownership. Python will call the destructor and delete operator when the
objects reference count reaches zero. Undefined behavior ensues when
object's reference count reaches zero. Undefined behavior ensues when
the C++ side does the same.. */
take_ownership,
@ -487,7 +562,7 @@ enum class return_value_policy : uint8_t {
move,
/** Reference an existing object, but do not take ownership. The C++ side
is responsible for managing the objects lifetime and deallocating it
is responsible for managing the object's lifetime and deallocating it
when it is no longer used. Warning: undefined behavior will ensue when
the C++ side deletes an object that is still referenced and used by
Python. */
@ -496,8 +571,8 @@ enum class return_value_policy : uint8_t {
/** This policy only applies to methods and properties. It references the
object without taking ownership similar to the above
return_value_policy::reference policy. In contrast to that policy, the
function or propertys implicit this argument (called the parent) is
considered to be the the owner of the return value (the child).
function or property's implicit this argument (called the parent) is
considered to be the owner of the return value (the child).
pybind11 then couples the lifetime of the parent to the child via a
reference relationship that ensures that the parent cannot be garbage
collected while Python is still using the child. More advanced
@ -580,6 +655,8 @@ struct instance {
bool simple_instance_registered : 1;
/// If true, get_internals().patients has an entry for this object
bool has_patients : 1;
/// If true, this Python object needs to be kept alive for the lifetime of the C++ value.
bool is_alias : 1;
/// Initializes all of the above type/values/holders data (but not the instance values
/// themselves)
@ -602,8 +679,16 @@ struct instance {
static_assert(std::is_standard_layout<instance>::value,
"Internal error: `pybind11::detail::instance` is not standard layout!");
// Some older compilers (e.g. gcc 9.4.0) require
// static_assert(always_false<T>::value, "...");
// instead of
// static_assert(false, "...");
// to trigger the static_assert() in a template only if it is actually instantiated.
template <typename>
struct always_false : std::false_type {};
/// from __cpp_future__ import (convenient aliases from C++14/17)
#if defined(PYBIND11_CPP14) && (!defined(_MSC_VER) || _MSC_VER >= 1910)
#if defined(PYBIND11_CPP14)
using std::conditional_t;
using std::enable_if_t;
using std::remove_cv_t;
@ -619,7 +704,7 @@ template <typename T>
using remove_reference_t = typename std::remove_reference<T>::type;
#endif
#if defined(PYBIND11_CPP20)
#if defined(PYBIND11_CPP20) && defined(__cpp_lib_remove_cvref)
using std::remove_cvref;
using std::remove_cvref_t;
#else
@ -631,6 +716,10 @@ template <class T>
using remove_cvref_t = typename remove_cvref<T>::type;
#endif
/// Example usage: is_same_ignoring_cvref<T, PyObject *>::value
template <typename T, typename U>
using is_same_ignoring_cvref = std::is_same<detail::remove_cvref_t<T>, U>;
/// Index sequences
#if defined(PYBIND11_CPP14)
using std::index_sequence;
@ -638,14 +727,49 @@ using std::make_index_sequence;
#else
template <size_t...>
struct index_sequence {};
template <size_t N, size_t... S>
struct make_index_sequence_impl : make_index_sequence_impl<N - 1, N - 1, S...> {};
template <size_t... S>
struct make_index_sequence_impl<0, S...> {
// Comments about the algorithm below.
//
// Credit: This is based on an algorithm by taocpp here:
// https://github.com/taocpp/sequences/blob/main/include/tao/seq/make_integer_sequence.hpp
// but significantly simplified.
//
// We build up a sequence S by repeatedly doubling its length and sometimes adding 1 to the end.
// E.g. if the current S is 0...3, then we either go to 0...7 or 0...8 on the next pass.
// The goal is to end with S = 0...N-1.
// The key insight is that the times we need to add an additional digit to S correspond
// exactly to the 1's in the binary representation of the number N.
//
// Invariants:
// - digit is a power of 2
// - N_digit_is_1 is whether N's binary representation has a 1 in that digit's position.
// - end <= N
// - S is 0...end-1.
// - if digit > 0, end * digit * 2 <= N < (end+1) * digit * 2
//
// The process starts with digit > N, end = 0, and S is empty.
// The process concludes with digit=0, in which case, end == N and S is 0...N-1.
template <size_t digit, bool N_digit_is_1, size_t N, size_t end, size_t... S> // N_digit_is_1=false
struct make_index_sequence_impl
: make_index_sequence_impl<digit / 2, (N & (digit / 2)) != 0, N, 2 * end, S..., (S + end)...> {
};
template <size_t digit, size_t N, size_t end, size_t... S>
struct make_index_sequence_impl<digit, true, N, end, S...>
: make_index_sequence_impl<digit / 2,
(N & (digit / 2)) != 0,
N,
2 * end + 1,
S...,
(S + end)...,
2 * end> {};
template <size_t N, size_t end, size_t... S>
struct make_index_sequence_impl<0, false, N, end, S...> {
using type = index_sequence<S...>;
};
constexpr size_t next_power_of_2(size_t N) { return N == 0 ? 1 : next_power_of_2(N >> 1) << 1; }
template <size_t N>
using make_index_sequence = typename make_index_sequence_impl<N>::type;
using make_index_sequence =
typename make_index_sequence_impl<next_power_of_2(N), false, N, 0>::type;
#endif
/// Make an index sequence of the indices of true arguments
@ -724,7 +848,16 @@ template <typename C, typename R, typename... A>
struct remove_class<R (C::*)(A...) const> {
using type = R(A...);
};
#ifdef __cpp_noexcept_function_type
template <typename C, typename R, typename... A>
struct remove_class<R (C::*)(A...) noexcept> {
using type = R(A...);
};
template <typename C, typename R, typename... A>
struct remove_class<R (C::*)(A...) const noexcept> {
using type = R(A...);
};
#endif
/// Helper template to strip away type modifiers
template <typename T>
struct intrinsic_type {
@ -866,13 +999,14 @@ struct is_template_base_of_impl {
/// Check if a template is the base of a type. For example:
/// `is_template_base_of<Base, T>` is true if `struct T : Base<U> {}` where U can be anything
template <template <typename...> class Base, typename T>
// Sadly, all MSVC versions incl. 2022 need the workaround, even in C++20 mode.
// See also: https://github.com/pybind/pybind11/pull/3741
#if !defined(_MSC_VER)
using is_template_base_of
= decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T> *) nullptr));
#else // MSVC2015 has trouble with decltype in template aliases
#else
struct is_template_base_of
: decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T> *) nullptr)) {
};
: decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T> *) nullptr)){};
#endif
/// Check if T is an instantiation of the template `Class`. For example:
@ -939,12 +1073,6 @@ using expand_side_effects = bool[];
PYBIND11_NAMESPACE_END(detail)
#if defined(_MSC_VER)
# pragma warning(push)
# pragma warning(disable : 4275)
// warning C4275: An exported class was derived from a class that wasn't exported.
// Can be ignored when derived from a STL class.
#endif
/// C++ bindings of builtin Python exceptions
class PYBIND11_EXPORT_EXCEPTION builtin_exception : public std::runtime_error {
public:
@ -952,9 +1080,6 @@ public:
/// Set the error using the Python C API
virtual void set_error() const = 0;
};
#if defined(_MSC_VER)
# pragma warning(pop)
#endif
#define PYBIND11_RUNTIME_EXCEPTION(name, type) \
class PYBIND11_EXPORT_EXCEPTION name : public builtin_exception { \
@ -978,15 +1103,26 @@ PYBIND11_RUNTIME_EXCEPTION(cast_error, PyExc_RuntimeError) /// Thrown when pybin
PYBIND11_RUNTIME_EXCEPTION(reference_cast_error, PyExc_RuntimeError) /// Used internally
[[noreturn]] PYBIND11_NOINLINE void pybind11_fail(const char *reason) {
assert(!PyErr_Occurred());
throw std::runtime_error(reason);
}
[[noreturn]] PYBIND11_NOINLINE void pybind11_fail(const std::string &reason) {
assert(!PyErr_Occurred());
throw std::runtime_error(reason);
}
template <typename T, typename SFINAE = void>
struct format_descriptor {};
template <typename T>
struct format_descriptor<
T,
detail::enable_if_t<detail::is_same_ignoring_cvref<T, PyObject *>::value>> {
static constexpr const char c = 'O';
static constexpr const char value[2] = {c, '\0'};
static std::string format() { return std::string(1, c); }
};
PYBIND11_NAMESPACE_BEGIN(detail)
// Returns the index of the given type in the type char array below, and in the list in numpy.h
// The order here is: bool; 8 ints ((signed,unsigned)x(8,16,32,64)bits); float,double,long double;
@ -1032,6 +1168,8 @@ constexpr const char
struct error_scope {
PyObject *type, *value, *trace;
error_scope() { PyErr_Fetch(&type, &value, &trace); }
error_scope(const error_scope &) = delete;
error_scope &operator=(const error_scope &) = delete;
~error_scope() { PyErr_Restore(type, value, trace); }
};
@ -1044,9 +1182,6 @@ struct nodelete {
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename... Args>
struct overload_cast_impl {
// NOLINTNEXTLINE(modernize-use-equals-default): MSVC 2015 needs this
constexpr overload_cast_impl() {}
template <typename Return>
constexpr auto operator()(Return (*pf)(Args...)) const noexcept -> decltype(pf) {
return pf;
@ -1073,8 +1208,7 @@ PYBIND11_NAMESPACE_END(detail)
/// - regular: static_cast<Return (Class::*)(Arg0, Arg1, Arg2)>(&Class::func)
/// - sweet: overload_cast<Arg0, Arg1, Arg2>(&Class::func)
template <typename... Args>
static constexpr detail::overload_cast_impl<Args...> overload_cast = {};
// MSVC 2015 only accepts this particular initialization syntax for this variable template.
static constexpr detail::overload_cast_impl<Args...> overload_cast{};
#endif
/// Const member function selector for overload_cast
@ -1160,7 +1294,7 @@ try_get_shared_from_this(std::enable_shared_from_this<T> *holder_value_ptr) {
// For silencing "unused" compiler warnings in special situations.
template <typename... Args>
#if defined(_MSC_VER) && _MSC_VER >= 1910 && _MSC_VER < 1920 // MSVC 2017
#if defined(_MSC_VER) && _MSC_VER < 1920 // MSVC 2017
constexpr
#endif
inline void
@ -1183,15 +1317,35 @@ constexpr
# define PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(...)
#endif
#if defined(_MSC_VER) // All versions (as of July 2021).
#if defined(__clang__) \
&& (defined(__apple_build_version__) /* AppleClang 13.0.0.13000029 was the only data point \
available. */ \
|| (__clang_major__ >= 7 \
&& __clang_major__ <= 12) /* Clang 3, 5, 13, 14, 15 do not generate the warning. */ \
)
# define PYBIND11_DETECTED_CLANG_WITH_MISLEADING_CALL_STD_MOVE_EXPLICITLY_WARNING
// Example:
// tests/test_kwargs_and_defaults.cpp:46:68: error: local variable 'args' will be copied despite
// being returned by name [-Werror,-Wreturn-std-move]
// m.def("args_function", [](py::args args) -> py::tuple { return args; });
// ^~~~
// test_kwargs_and_defaults.cpp:46:68: note: call 'std::move' explicitly to avoid copying
// m.def("args_function", [](py::args args) -> py::tuple { return args; });
// ^~~~
// std::move(args)
#endif
// warning C4127: Conditional expression is constant
constexpr inline bool silence_msvc_c4127(bool cond) { return cond; }
// Pybind offers detailed error messages by default for all builts that are debug (through the
// negation of NDEBUG). This can also be manually enabled by users, for any builds, through
// defining PYBIND11_DETAILED_ERROR_MESSAGES. This information is primarily useful for those
// who are writing (as opposed to merely using) libraries that use pybind11.
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES) && !defined(NDEBUG)
# define PYBIND11_DETAILED_ERROR_MESSAGES
#endif
# define PYBIND11_SILENCE_MSVC_C4127(...) ::pybind11::detail::silence_msvc_c4127(__VA_ARGS__)
#else
# define PYBIND11_SILENCE_MSVC_C4127(...) __VA_ARGS__
// CPython 3.11+ provides Py_TPFLAGS_MANAGED_DICT, but PyPy3.11 does not, see PR #5508.
#if PY_VERSION_HEX < 0x030B0000 || defined(PYPY_VERSION)
# define PYBIND11_BACKWARD_COMPATIBILITY_TP_DICTOFFSET
#endif
PYBIND11_NAMESPACE_END(detail)

View File

@ -0,0 +1,75 @@
// Copyright (c) 2024 The pybind Community.
#pragma once
#include <pybind11/pytypes.h>
#include "common.h"
#include "internals.h"
#include <typeinfo>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
// Forward declaration needed here: Refactoring opportunity.
extern "C" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *, PyObject *);
inline bool type_is_managed_by_our_internals(PyTypeObject *type_obj) {
#if defined(PYPY_VERSION)
auto &internals = get_internals();
return bool(internals.registered_types_py.find(type_obj)
!= internals.registered_types_py.end());
#else
return bool(type_obj->tp_new == pybind11_object_new);
#endif
}
inline bool is_instance_method_of_type(PyTypeObject *type_obj, PyObject *attr_name) {
PyObject *descr = _PyType_Lookup(type_obj, attr_name);
return bool((descr != nullptr) && PyInstanceMethod_Check(descr));
}
inline object try_get_cpp_conduit_method(PyObject *obj) {
if (PyType_Check(obj)) {
return object();
}
PyTypeObject *type_obj = Py_TYPE(obj);
str attr_name("_pybind11_conduit_v1_");
bool assumed_to_be_callable = false;
if (type_is_managed_by_our_internals(type_obj)) {
if (!is_instance_method_of_type(type_obj, attr_name.ptr())) {
return object();
}
assumed_to_be_callable = true;
}
PyObject *method = PyObject_GetAttr(obj, attr_name.ptr());
if (method == nullptr) {
PyErr_Clear();
return object();
}
if (!assumed_to_be_callable && PyCallable_Check(method) == 0) {
Py_DECREF(method);
return object();
}
return reinterpret_steal<object>(method);
}
inline void *try_raw_pointer_ephemeral_from_cpp_conduit(handle src,
const std::type_info *cpp_type_info) {
object method = try_get_cpp_conduit_method(src.ptr());
if (method) {
capsule cpp_type_info_capsule(const_cast<void *>(static_cast<const void *>(cpp_type_info)),
typeid(std::type_info).name());
object cpp_conduit = method(bytes(PYBIND11_PLATFORM_ABI_ID),
cpp_type_info_capsule,
bytes("raw_pointer_ephemeral"));
if (isinstance<capsule>(cpp_conduit)) {
return reinterpret_borrow<capsule>(cpp_conduit).get_pointer();
}
}
return nullptr;
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)

View File

@ -99,6 +99,26 @@ constexpr descr<1, Type> const_name() {
return {'%'};
}
// Use a different name based on whether the parameter is used as input or output
template <size_t N1, size_t N2>
constexpr descr<N1 + N2 + 1> io_name(char const (&text1)[N1], char const (&text2)[N2]) {
return const_name("@") + const_name(text1) + const_name("@") + const_name(text2)
+ const_name("@");
}
// Ternary description for io_name (like the numeric type_caster)
template <bool B, size_t N1, size_t N2, size_t N3, size_t N4>
constexpr enable_if_t<B, descr<N1 + N2 + 1>>
io_name(char const (&text1)[N1], char const (&text2)[N2], char const (&)[N3], char const (&)[N4]) {
return io_name(text1, text2);
}
template <bool B, size_t N1, size_t N2, size_t N3, size_t N4>
constexpr enable_if_t<!B, descr<N3 + N4 + 1>>
io_name(char const (&)[N1], char const (&)[N2], char const (&text3)[N3], char const (&text4)[N4]) {
return io_name(text3, text4);
}
// If "_" is defined as a macro, py::detail::_ cannot be provided.
// It is therefore best to use py::detail::const_name universally.
// This block is for backward compatibility only.
@ -137,22 +157,70 @@ constexpr descr<1, Type> _() {
#endif // #ifndef _
constexpr descr<0> concat() { return {}; }
constexpr descr<0> union_concat() { return {}; }
template <size_t N, typename... Ts>
constexpr descr<N, Ts...> concat(const descr<N, Ts...> &descr) {
return descr;
}
template <size_t N, typename... Ts>
constexpr descr<N, Ts...> union_concat(const descr<N, Ts...> &descr) {
return descr;
}
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2>
constexpr descr<N1 + N2 + 3, Ts1..., Ts2...> operator|(const descr<N1, Ts1...> &a,
const descr<N2, Ts2...> &b) {
return a + const_name(" | ") + b;
}
#ifdef __cpp_fold_expressions
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2>
constexpr descr<N1 + N2 + 2, Ts1..., Ts2...> operator,(const descr<N1, Ts1...> &a,
const descr<N2, Ts2...> &b) {
return a + const_name(", ") + b;
}
template <size_t N, typename... Ts, typename... Args>
constexpr auto concat(const descr<N, Ts...> &d, const Args &...args) {
return (d, ..., args);
}
template <size_t N, typename... Ts, typename... Args>
constexpr auto union_concat(const descr<N, Ts...> &d, const Args &...args) {
return (d | ... | args);
}
#else
template <size_t N, typename... Ts, typename... Args>
constexpr auto concat(const descr<N, Ts...> &d, const Args &...args)
-> decltype(std::declval<descr<N + 2, Ts...>>() + concat(args...)) {
return d + const_name(", ") + concat(args...);
}
template <size_t N, typename... Ts, typename... Args>
constexpr auto union_concat(const descr<N, Ts...> &d, const Args &...args)
-> decltype(std::declval<descr<N + 3, Ts...>>() + union_concat(args...)) {
return d + const_name(" | ") + union_concat(args...);
}
#endif
template <size_t N, typename... Ts>
constexpr descr<N + 2, Ts...> type_descr(const descr<N, Ts...> &descr) {
return const_name("{") + descr + const_name("}");
}
template <size_t N, typename... Ts>
constexpr descr<N + 4, Ts...> arg_descr(const descr<N, Ts...> &descr) {
return const_name("@^") + descr + const_name("@!");
}
template <size_t N, typename... Ts>
constexpr descr<N + 4, Ts...> return_descr(const descr<N, Ts...> &descr) {
return const_name("@$") + descr + const_name("@!");
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)

View File

@ -0,0 +1,39 @@
// Copyright (c) 2021 The Pybind Development Team.
// All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#pragma once
#include "common.h"
#include <type_traits>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename To, typename From, typename SFINAE = void>
struct dynamic_raw_ptr_cast_is_possible : std::false_type {};
template <typename To, typename From>
struct dynamic_raw_ptr_cast_is_possible<
To,
From,
detail::enable_if_t<!std::is_same<To, void>::value && std::is_polymorphic<From>::value>>
: std::true_type {};
template <typename To,
typename From,
detail::enable_if_t<!dynamic_raw_ptr_cast_is_possible<To, From>::value, int> = 0>
To *dynamic_raw_ptr_cast_if_possible(From * /*ptr*/) {
return nullptr;
}
template <typename To,
typename From,
detail::enable_if_t<dynamic_raw_ptr_cast_is_possible<To, From>::value, int> = 0>
To *dynamic_raw_ptr_cast_if_possible(From *ptr) {
return dynamic_cast<To *>(ptr);
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)

View File

@ -0,0 +1,71 @@
/*
pybind11/detail/exception_translation.h: means to translate C++ exceptions to Python exceptions
Copyright (c) 2024 The Pybind Development Team.
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "common.h"
#include "internals.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
// Apply all the extensions translators from a list
// Return true if one of the translators completed without raising an exception
// itself. Return of false indicates that if there are other translators
// available, they should be tried.
inline bool apply_exception_translators(std::forward_list<ExceptionTranslator> &translators) {
auto last_exception = std::current_exception();
for (auto &translator : translators) {
try {
translator(last_exception);
return true;
} catch (...) {
last_exception = std::current_exception();
}
}
return false;
}
inline void try_translate_exceptions() {
/* When an exception is caught, give each registered exception
translator a chance to translate it to a Python exception. First
all module-local translators will be tried in reverse order of
registration. If none of the module-locale translators handle
the exception (or there are no module-locale translators) then
the global translators will be tried, also in reverse order of
registration.
A translator may choose to do one of the following:
- catch the exception and call py::set_error()
to set a standard (or custom) Python exception, or
- do nothing and let the exception fall through to the next translator, or
- delegate translation to the next translator by throwing a new type of exception.
*/
bool handled = with_exception_translators(
[&](std::forward_list<ExceptionTranslator> &exception_translators,
std::forward_list<ExceptionTranslator> &local_exception_translators) {
if (detail::apply_exception_translators(local_exception_translators)) {
return true;
}
if (detail::apply_exception_translators(exception_translators)) {
return true;
}
return false;
});
if (!handled) {
set_error(PyExc_SystemError, "Exception escaped from default exception translator!");
}
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)

View File

@ -0,0 +1,191 @@
// Copyright (c) 2024-2025 The Pybind Development Team.
// All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// For background see the description of PR google/pybind11clif#30099.
#pragma once
#include <pybind11/attr.h>
#include <pybind11/conduit/pybind11_platform_abi_id.h>
#include <pybind11/pytypes.h>
#include "common.h"
#include <cstring>
#include <utility>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
struct function_record_PyObject {
PyObject_HEAD
function_record *cpp_func_rec;
};
PYBIND11_NAMESPACE_BEGIN(function_record_PyTypeObject_methods)
PyObject *tp_new_impl(PyTypeObject *type, PyObject *args, PyObject *kwds);
PyObject *tp_alloc_impl(PyTypeObject *type, Py_ssize_t nitems);
int tp_init_impl(PyObject *self, PyObject *args, PyObject *kwds);
void tp_dealloc_impl(PyObject *self);
void tp_free_impl(void *self);
static PyObject *reduce_ex_impl(PyObject *self, PyObject *, PyObject *);
static PyMethodDef tp_methods_impl[]
= {{"__reduce_ex__",
// reduce_ex_impl is a PyCFunctionWithKeywords, but PyMethodDef
// requires a PyCFunction. The cast through void* is safe and
// idiomatic with METH_KEYWORDS, and it successfully sidesteps
// unhelpful compiler warnings.
// NOLINTNEXTLINE(bugprone-casting-through-void)
reinterpret_cast<PyCFunction>(reinterpret_cast<void *>(reduce_ex_impl)),
METH_VARARGS | METH_KEYWORDS,
nullptr},
{nullptr, nullptr, 0, nullptr}};
// Python 3.12+ emits a DeprecationWarning for heap types whose tp_name does
// not contain a dot ('.') and that lack a __module__ attribute. For pybind11's
// internal function_record type, we do not have an actual module object to
// attach, so we cannot use PyType_FromModuleAndSpec (introduced in Python 3.9)
// to set __module__ automatically.
//
// As a workaround, we define a "qualified" type name that includes a dummy
// module name (PYBIND11_DUMMY_MODULE_NAME). This is nonidiomatic but avoids
// the deprecation warning, and results in reprs like
//
// <class 'pybind11_builtins.pybind11_detail_function_record_...'>
//
// even though no real pybind11_builtins module exists. If pybind11 gains an
// actual module object in the future, this code should switch to
// PyType_FromModuleAndSpec for Python 3.9+ and drop the dummy module
// workaround.
//
// Note that this name is versioned.
#define PYBIND11_DETAIL_FUNCTION_RECORD_TP_PLAINNAME \
"pybind11_detail_function_record_" PYBIND11_DETAIL_FUNCTION_RECORD_ABI_ID \
"_" PYBIND11_PLATFORM_ABI_ID
constexpr char tp_plainname_impl[] = PYBIND11_DETAIL_FUNCTION_RECORD_TP_PLAINNAME;
constexpr char tp_qualname_impl[]
= PYBIND11_DUMMY_MODULE_NAME "." PYBIND11_DETAIL_FUNCTION_RECORD_TP_PLAINNAME;
PYBIND11_NAMESPACE_END(function_record_PyTypeObject_methods)
static PyType_Slot function_record_PyType_Slots[] = {
{Py_tp_dealloc,
reinterpret_cast<void *>(function_record_PyTypeObject_methods::tp_dealloc_impl)},
{Py_tp_methods,
reinterpret_cast<void *>(function_record_PyTypeObject_methods::tp_methods_impl)},
{Py_tp_init, reinterpret_cast<void *>(function_record_PyTypeObject_methods::tp_init_impl)},
{Py_tp_alloc, reinterpret_cast<void *>(function_record_PyTypeObject_methods::tp_alloc_impl)},
{Py_tp_new, reinterpret_cast<void *>(function_record_PyTypeObject_methods::tp_new_impl)},
{Py_tp_free, reinterpret_cast<void *>(function_record_PyTypeObject_methods::tp_free_impl)},
{0, nullptr}};
static PyType_Spec function_record_PyType_Spec
= {function_record_PyTypeObject_methods::tp_qualname_impl,
sizeof(function_record_PyObject),
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE,
function_record_PyType_Slots};
inline PyTypeObject *get_function_record_PyTypeObject() {
PyTypeObject *&py_type_obj = detail::get_local_internals().function_record_py_type;
if (!py_type_obj) {
PyObject *py_obj = PyType_FromSpec(&function_record_PyType_Spec);
if (py_obj == nullptr) {
throw error_already_set();
}
py_type_obj = reinterpret_cast<PyTypeObject *>(py_obj);
}
return py_type_obj;
}
inline bool is_function_record_PyObject(PyObject *obj) {
if (PyType_Check(obj) != 0) {
return false;
}
PyTypeObject *obj_type = Py_TYPE(obj);
PyTypeObject *frtype = get_function_record_PyTypeObject();
// Fast path (pointer comparison).
if (obj_type == frtype) {
return true;
}
// This works across extension modules. Note that tp_name is versioned.
if (strcmp(obj_type->tp_name, function_record_PyTypeObject_methods::tp_qualname_impl) == 0
|| strcmp(obj_type->tp_name, function_record_PyTypeObject_methods::tp_plainname_impl)
== 0) {
return true;
}
return false;
}
inline function_record *function_record_ptr_from_PyObject(PyObject *obj) {
if (is_function_record_PyObject(obj)) {
return ((detail::function_record_PyObject *) obj)->cpp_func_rec;
}
return nullptr;
}
inline object function_record_PyObject_New() {
auto *py_func_rec = PyObject_New(function_record_PyObject, get_function_record_PyTypeObject());
if (py_func_rec == nullptr) {
throw error_already_set();
}
py_func_rec->cpp_func_rec = nullptr; // For clarity/purity. Redundant in practice.
return reinterpret_steal<object>((PyObject *) py_func_rec);
}
PYBIND11_NAMESPACE_BEGIN(function_record_PyTypeObject_methods)
// Guard against accidents & oversights, in particular when porting to future Python versions.
inline PyObject *tp_new_impl(PyTypeObject *, PyObject *, PyObject *) {
pybind11_fail("UNEXPECTED CALL OF function_record_PyTypeObject_methods::tp_new_impl");
// return nullptr; // Unreachable.
}
inline PyObject *tp_alloc_impl(PyTypeObject *, Py_ssize_t) {
pybind11_fail("UNEXPECTED CALL OF function_record_PyTypeObject_methods::tp_alloc_impl");
// return nullptr; // Unreachable.
}
inline int tp_init_impl(PyObject *, PyObject *, PyObject *) {
pybind11_fail("UNEXPECTED CALL OF function_record_PyTypeObject_methods::tp_init_impl");
// return -1; // Unreachable.
}
inline void tp_free_impl(void *) {
pybind11_fail("UNEXPECTED CALL OF function_record_PyTypeObject_methods::tp_free_impl");
}
inline PyObject *reduce_ex_impl(PyObject *self, PyObject *, PyObject *) {
// Deliberately ignoring the arguments for simplicity (expected is `protocol: int`).
const function_record *rec = function_record_ptr_from_PyObject(self);
if (rec == nullptr) {
pybind11_fail(
"FATAL: function_record_PyTypeObject reduce_ex_impl(): cannot obtain cpp_func_rec.");
}
if (rec->name != nullptr && rec->name[0] != '\0' && rec->scope
&& PyModule_Check(rec->scope.ptr()) != 0) {
object scope_module = get_scope_module(rec->scope);
if (scope_module) {
auto builtins = reinterpret_borrow<dict>(PyEval_GetBuiltins());
auto builtins_eval = builtins["eval"];
auto reconstruct_args = make_tuple(str("__import__('importlib').import_module('")
+ scope_module + str("')"));
return make_tuple(std::move(builtins_eval), std::move(reconstruct_args))
.release()
.ptr();
}
}
set_error(PyExc_RuntimeError, repr(self) + str(" is not pickleable."));
return nullptr;
}
PYBIND11_NAMESPACE_END(function_record_PyTypeObject_methods)
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)

Some files were not shown because too many files have changed in this diff Show More