PyTorch uses channels_first format by default (N, C, H, W), while hls4ml expects channels_last (N, H, W, C).
python
# hls4ml automatically handles conversion for io_parallelhls_model = hls4ml.converters.convert_from_pytorch_model( model, hls_config=config, io_type='io_parallel' # Automatic transpose layers added)# For io_stream, you may need to transpose manually# Not all transpose operations are supported with io_stream
# Issue: F.linear is not supported# Solution: Use nn.Linear insteadclass MyModel(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.randn(10, 5)) def forward(self, x): # Don't use: return F.linear(x, self.weight) # Use instead: self.fc = nn.Linear(5, 10) return self.fc(x)# Issue: torch.nn.functional.conv2d not supported# Solution: Use nn.Conv2d module
Use torch.nn modules instead of functional operations where possible.
Graph tracing issues
Some dynamic operations may cause tracing issues:
python
# Problematic: Dynamic control flowclass BadModel(nn.Module): def forward(self, x): if x.sum() > 0: # Dynamic condition return self.fc1(x) return self.fc2(x)# Solution: Use static control flow or torch.jit.scriptclass GoodModel(nn.Module): def forward(self, x): # Static operations only return self.fc1(x) + self.fc2(x)# Or: Pre-trace with representative inputimport torch.fxtracer = torch.fx.Tracer()traced = tracer.trace(model)
Channels format issues
For io_stream with channels_first:
python
# Issue: Automatic transpose not supported for io_streamhls_model = hls4ml.converters.convert_from_pytorch_model( model, hls_config=config, io_type='io_stream' # May fail with conv layers)# Solution 1: Use io_parallelio_type='io_parallel'# Solution 2: Manually transpose in modelclass ChannelsLastModel(nn.Module): def forward(self, x): x = x.permute(0, 2, 3, 1) # NCHW -> NHWC # ... rest of forward pass return x
Precision mismatch
Improve PyTorch-to-HLS accuracy:
python
# 1. Use higher precisionconfig['Model']['Precision'] = 'ap_fixed<32,16>'# 2. Configure specific layersconfig['LayerName']['fc1'] = {'Precision': 'ap_fixed<24,12>'}# 3. Check input data type matchesX_test = X_test.astype(np.float32) # Match PyTorch default# 4. Use same random seedtorch.manual_seed(42)np.random.seed(42)
Convolution groups not supported
python
# Issue: Grouped convolutions (groups > 1)self.conv = nn.Conv2d(32, 64, 3, groups=2) # Not supported# Solution: Use groups=1 (default)self.conv = nn.Conv2d(32, 64, 3, groups=1)# For depthwise separable, use separate layersself.depthwise = nn.Conv2d(32, 32, 3, groups=32) # Depthwiseself.pointwise = nn.Conv2d(32, 64, 1) # Pointwise
# Load model with state dictclass MyModel(nn.Module): # ... model definitionmodel = MyModel()state_dict = torch.load('model_weights.pth')model.load_state_dict(state_dict)model.eval()# Converthls_model = hls4ml.converters.convert_from_pytorch_model( model, hls_config=config)