create_config
Create an initial configuration for model conversion.
hls4ml.utils.config.create_config(
output_dir='my-hls-test',
project_name='myproject',
backend='Vivado',
version='1.0.0',
**kwargs
)
Parameters
Output directory for the generated HLS project.
Name of the HLS project (used as top-level function name).
Backend to use: 'Vivado', 'Vitis', 'Quartus', 'Catapult', etc.
Backend-Specific Parameters
FPGA part number (e.g., 'xcvu13p-flga2577-2-e').
Target board name from supported_board.json.
Clock period in nanoseconds.
Clock uncertainty as percentage.
Interface type: 'io_parallel' or 'io_stream'.
Returns
Configuration dictionary ready for conversion.
Example
import hls4ml
# Basic configuration
config = hls4ml.utils.config.create_config(
output_dir='my-hls-test',
project_name='myproject',
backend='Vivado'
)
# With FPGA details
config = hls4ml.utils.config.create_config(
output_dir='production',
project_name='my_nn',
backend='Vivado',
part='xcvu9p-flgb2104-2-i',
clock_period=4,
io_type='io_stream'
)
# Using board preset
config = hls4ml.utils.config.create_config(
output_dir='pynq_project',
project_name='pynq_nn',
backend='Vivado',
board='pynq-z2'
)
config_from_keras_model
Generate HLS configuration from a Keras model structure.
hls4ml.utils.config.config_from_keras_model(
model,
granularity='model',
backend=None,
default_precision='fixed<16,6>',
default_reuse_factor=1,
max_precision=None
)
Parameters
model
keras.Model | dict
required
Keras model or model architecture dictionary.
Configuration granularity:
'model': Global settings only
'type': Settings per layer type
'name': Settings per individual layer
Backend name for backend-specific attributes.
Default precision for all layers.
Default reuse factor (1 = fully parallel).
Maximum allowed precision (for bit width limits).
Returns
HLS configuration dictionary.
Example
import tensorflow as tf
import hls4ml
# Create Keras model
model = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu', input_shape=(10,)),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(1)
])
# Generate config - model granularity
config = hls4ml.utils.config.config_from_keras_model(
model,
granularity='model'
)
print(config)
# {
# 'Model': {
# 'Precision': {'default': 'fixed<16,6>'},
# 'ReuseFactor': 1,
# 'Strategy': 'Latency'
# }
# }
# Type granularity - per layer type
config = hls4ml.utils.config.config_from_keras_model(
model,
granularity='type',
backend='Vivado'
)
print(config['LayerType']['Dense'])
# {
# 'Precision': {'default': 'auto'},
# 'ReuseFactor': 1
# }
# Name granularity - per layer
config = hls4ml.utils.config.config_from_keras_model(
model,
granularity='name'
)
print(list(config['LayerName'].keys()))
# ['dense', 'dense_1', 'dense_2']
Customizing Generated Config
# Generate base config
config = hls4ml.utils.config.config_from_keras_model(
model,
granularity='name',
default_precision='ap_fixed<16,6>',
default_reuse_factor=4
)
# Customize specific layers
config['LayerName']['dense_1']['Precision'] = 'ap_fixed<18,8>'
config['LayerName']['dense_1']['ReuseFactor'] = 2
# Convert with custom config
hls_model = hls4ml.converters.convert_from_keras_model(
model,
hls_config=config,
output_dir='custom_config'
)
config_from_pytorch_model
Generate HLS configuration from a PyTorch model.
hls4ml.utils.config.config_from_pytorch_model(
model,
input_shape,
granularity='model',
backend=None,
default_precision='ap_fixed<16,6>',
default_reuse_factor=1,
channels_last_conversion='full',
transpose_outputs=False,
max_precision=None
)
Parameters
Input shape(s) excluding batch dimension.
Channel conversion mode:
'full': Convert inputs and internal layers
'internal': Only internal layers
'off': No conversion
Transpose outputs back to channels-first format.
Returns
HLS configuration dictionary with 'InputShape' key.
Example
import torch
import torch.nn as nn
import hls4ml
# PyTorch model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(10, 64)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(64, 1)
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.fc2(x)
return x
model = Net()
# Generate config
config = hls4ml.utils.config.config_from_pytorch_model(
model,
input_shape=(10,),
granularity='name',
default_precision='ap_fixed<16,6>',
default_reuse_factor=1
)
print(config['InputShape']) # (10,)
print(list(config['LayerName'].keys())) # ['fc1', 'relu', 'fc2']
# Customize and convert
config['LayerName']['fc1']['ReuseFactor'] = 2
hls_model = hls4ml.converters.convert_from_pytorch_model(
model,
hls_config=config,
output_dir='pytorch_custom'
)
config_from_onnx_model
Generate HLS configuration from an ONNX model.
hls4ml.utils.config.config_from_onnx_model(
model,
granularity='name',
backend=None,
default_precision='fixed<16,6>',
default_reuse_factor=1,
max_precision=None
)
Parameters
Configuration granularity (recommended: 'name' for ONNX).
Returns
HLS configuration dictionary.
Example
import onnx
import hls4ml
# Load ONNX model
onnx_model = onnx.load('model.onnx')
# Generate config
config = hls4ml.utils.config.config_from_onnx_model(
onnx_model,
granularity='name',
backend='Vivado',
default_precision='ap_fixed<16,6>'
)
# Inspect generated config
for layer_name in config['LayerName']:
print(f"{layer_name}: {config['LayerName'][layer_name]}")
# Convert
hls_model = hls4ml.converters.convert_from_onnx_model(
onnx_model,
hls_config=config,
output_dir='onnx_project'
)
Configuration Structure
Model-Level Config
config = {
'Model': {
'Precision': 'ap_fixed<16,6>', # Default precision
'ReuseFactor': 1, # Default reuse factor
'Strategy': 'Latency', # 'Latency' or 'Resource'
'BramFactor': 1000000000, # Weight size for BRAM
'TraceOutput': False, # Enable layer tracing
'ConvImplementation': 'LineBuffer', # Conv implementation
'PipelineStyle': 'auto' # HLS pipeline style
}
}
Layer Type Config
config = {
'LayerType': {
'Dense': {
'Precision': {
'weight': 'ap_fixed<8,4>',
'bias': 'ap_fixed<8,4>',
'result': 'ap_fixed<16,6>'
},
'ReuseFactor': 4
},
'Activation': {
'Precision': 'ap_fixed<16,6>',
'table_size': 1024
}
}
}
Layer Name Config
config = {
'LayerName': {
'dense_1': {
'Precision': {
'weight': 'ap_fixed<12,6>',
'bias': 'ap_fixed<12,6>',
'result': 'ap_fixed<16,6>'
},
'ReuseFactor': 2,
'Strategy': 'Latency'
},
'relu_1': {
'Precision': 'ap_fixed<16,6>',
'table_size': 512
}
}
}
Complete Example
import tensorflow as tf
import hls4ml
import numpy as np
# Create Keras model
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu', input_shape=(784,)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
# Generate base config
hls_config = hls4ml.utils.config.config_from_keras_model(
model,
granularity='name',
default_precision='ap_fixed<16,6>',
default_reuse_factor=1
)
# Customize precision and reuse factors
hls_config['Model']['Strategy'] = 'Latency'
hls_config['LayerName']['dense']['ReuseFactor'] = 2
hls_config['LayerName']['dense']['Precision'] = {
'weight': 'ap_fixed<8,4>',
'bias': 'ap_fixed<8,4>',
'result': 'ap_fixed<16,6>'
}
hls_config['LayerName']['dense_1']['ReuseFactor'] = 4
hls_config['LayerName']['dense_2']['ReuseFactor'] = 8
hls_config['LayerName']['activation']['table_size'] = 512
# Create backend config
backend_config = hls4ml.utils.config.create_config(
output_dir='mnist_hls',
project_name='mnist_nn',
backend='Vivado',
board='pynq-z2',
io_type='io_stream'
)
# Combine and convert
full_config = {**backend_config}
full_config['HLSConfig'] = hls_config
hls_model = hls4ml.converters.convert_from_keras_model(
model,
**full_config
)
# Test
hls_model.compile()
X_test = np.random.rand(10, 784).astype(np.float32)
predictions = hls_model.predict(X_test)
print(f"Predictions: {predictions.shape}")
# Synthesize
report = hls_model.build(csim=True, synth=True)
print(f"Resources: LUT={report['LUT']}, FF={report['FF']}, DSP={report['DSP']}")
Configuration Tips
Optimizing for Latency
config = {
'Model': {
'Precision': 'ap_fixed<16,6>',
'ReuseFactor': 1, # Fully parallel
'Strategy': 'Latency',
'ConvImplementation': 'Encoded' # Fast convolution
}
}
Optimizing for Resources
config = {
'Model': {
'Precision': 'ap_fixed<8,4>', # Lower precision
'ReuseFactor': 64, # More sequential
'Strategy': 'Resource'
}
}
Mixed Precision
config = {
'Model': {
'Precision': 'ap_fixed<16,6>'
},
'LayerName': {
'first_layer': {
'Precision': 'ap_fixed<12,6>' # Lower for first layer
},
'output': {
'Precision': 'ap_fixed<18,8>' # Higher for output
}
}
}
fetch_example_model
Download an example model with data and configuration from the hls4ml example-models repository.
hls4ml.utils.fetch_example_model(model_name, backend='Vivado')
Parameters
Name of the example model (e.g., 'KERAS_3layer.json'). Use fetch_example_list() to see available models.
Backend to use for model conversion.
Returns
Configuration dictionary ready to use with convert_from_config().
Example
import hls4ml
# Fetch an example model
config = hls4ml.utils.fetch_example_model('KERAS_3layer.json')
# Convert to HLS
hls_model = hls4ml.converters.convert_from_config(config)
# Build and test
hls_model.compile()
hls_model.build()
fetch_example_list
Display a list of all available example models in the repository.
hls4ml.utils.fetch_example_list()
Prints a formatted list of available example models from the example-models repository.
Example
import hls4ml
# List all available example models
hls4ml.utils.fetch_example_list()
# Output:
# {
# 'KERAS_3layer.json': 'Simple 3-layer Keras model',
# 'pytorch_cnn.pt': 'CNN for MNIST',
# ...
# }
See Also