Files
ldpc_optical/model/gen_firmware_vectors.py
cah 74baf3cd05 feat: add test vector generation for RTL verification
Improve generate_test_vectors() to use mixed SNR levels (high SNR for
first half, nominal for second half) ensuring a mix of converged and
non-converged test cases. Add gen_firmware_vectors.py converter that
reads test_vectors.json and produces packed LLR data matching the
RTL wishbone interface format (5 LLRs per 32-bit word, 6-bit two's
complement).

Generated 20 vectors: 11 converged, 9 non-converged.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-25 18:36:26 -07:00

289 lines
11 KiB
Python

#!/usr/bin/env python3
"""
Generate test vector files for cocotb and firmware from the Python model output.
Reads data/test_vectors.json and produces:
1. chip_ignite/verilog/dv/cocotb/ldpc_tests/test_data.py (cocotb Python module)
2. chip_ignite/firmware/ldpc_demo/test_vectors.h (C header for PicoRV32)
LLR packing format (matches wishbone_interface.sv):
Each 32-bit word holds 5 LLRs, 6 bits each, in two's complement.
Word[i] bits [5:0] = LLR[5*i+0]
Word[i] bits [11:6] = LLR[5*i+1]
Word[i] bits [17:12] = LLR[5*i+2]
Word[i] bits [23:18] = LLR[5*i+3]
Word[i] bits [29:24] = LLR[5*i+4]
52 words cover 260 LLRs (256 used, last 4 are zero-padded).
"""
import json
import os
import sys
# Paths relative to this script's directory
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_DIR = os.path.dirname(SCRIPT_DIR)
INPUT_FILE = os.path.join(PROJECT_DIR, 'data', 'test_vectors.json')
COCOTB_OUTPUT = os.path.join(PROJECT_DIR, 'chip_ignite', 'verilog', 'dv', 'cocotb',
'ldpc_tests', 'test_data.py')
FIRMWARE_OUTPUT = os.path.join(PROJECT_DIR, 'chip_ignite', 'firmware', 'ldpc_demo',
'test_vectors.h')
Q_BITS = 6
LLRS_PER_WORD = 5
N_LLR = 256
N_WORDS = (N_LLR + LLRS_PER_WORD - 1) // LLRS_PER_WORD # 52
K = 32
def signed_to_twos_complement(val, bits=Q_BITS):
"""Convert signed integer to two's complement unsigned representation."""
if val < 0:
return val + (1 << bits)
return val & ((1 << bits) - 1)
def pack_llr_words(llr_quantized):
"""
Pack 256 signed LLRs into 52 uint32 words.
Each word contains 5 LLRs, 6 bits each:
bits[5:0] = LLR[5*word + 0]
bits[11:6] = LLR[5*word + 1]
bits[17:12] = LLR[5*word + 2]
bits[23:18] = LLR[5*word + 3]
bits[29:24] = LLR[5*word + 4]
"""
# Pad to 260 entries (52 * 5)
padded = list(llr_quantized) + [0] * (N_WORDS * LLRS_PER_WORD - N_LLR)
words = []
for w in range(N_WORDS):
word = 0
for p in range(LLRS_PER_WORD):
llr_idx = w * LLRS_PER_WORD + p
tc = signed_to_twos_complement(padded[llr_idx])
word |= (tc & 0x3F) << (p * Q_BITS)
words.append(word)
return words
def bits_to_uint32(bits):
"""Convert a list of 32 binary values to a single uint32 (bit 0 = LSB)."""
val = 0
for i, b in enumerate(bits):
if b:
val |= (1 << i)
return val
def generate_cocotb_test_data(vectors):
"""Generate Python module for cocotb tests."""
lines = []
lines.append('"""')
lines.append('Auto-generated test vector data for LDPC decoder cocotb tests.')
lines.append('Generated by model/gen_firmware_vectors.py')
lines.append('')
lines.append('LLR packing: 5 LLRs per 32-bit word, 6 bits each (two\'s complement)')
lines.append(' Word bits [5:0] = LLR[5*i+0]')
lines.append(' Word bits [11:6] = LLR[5*i+1]')
lines.append(' Word bits [17:12] = LLR[5*i+2]')
lines.append(' Word bits [23:18] = LLR[5*i+3]')
lines.append(' Word bits [29:24] = LLR[5*i+4]')
lines.append('"""')
lines.append('')
lines.append(f'# Number of test vectors')
lines.append(f'NUM_VECTORS = {len(vectors)}')
lines.append(f'LLR_WORDS_PER_VECTOR = {N_WORDS}')
lines.append('')
lines.append('# Wishbone register offsets (byte-addressed)')
lines.append('REG_CTRL = 0x00')
lines.append('REG_STATUS = 0x04')
lines.append('REG_LLR_BASE = 0x10 # 52 words: 0x10, 0x14, ..., 0xDC')
lines.append('REG_DECODED = 0x50')
lines.append('REG_VERSION = 0x54')
lines.append('')
lines.append('')
lines.append('TEST_VECTORS = [')
for vec in vectors:
llr_words = pack_llr_words(vec['llr_quantized'])
decoded_word = bits_to_uint32(vec['decoded_bits'])
lines.append(' {')
lines.append(f' \'index\': {vec["index"]},')
# Format LLR words as hex, 8 per line
lines.append(f' \'llr_words\': [')
for chunk_start in range(0, len(llr_words), 8):
chunk = llr_words[chunk_start:chunk_start + 8]
hex_str = ', '.join(f'0x{w:08X}' for w in chunk)
comma = ',' if chunk_start + 8 < len(llr_words) else ''
lines.append(f' {hex_str}{comma}')
lines.append(f' ],')
lines.append(f' \'decoded_word\': 0x{decoded_word:08X},')
lines.append(f' \'info_bits\': {vec["info_bits"]},')
lines.append(f' \'converged\': {vec["converged"]},')
lines.append(f' \'iterations\': {vec["iterations"]},')
lines.append(f' \'syndrome_weight\': {vec["syndrome_weight"]},')
lines.append(f' \'bit_errors\': {vec["bit_errors"]},')
lines.append(' },')
lines.append(']')
lines.append('')
lines.append('')
lines.append('def get_converged_vectors():')
lines.append(' """Return only vectors that converged (for positive testing)."""')
lines.append(' return [v for v in TEST_VECTORS if v[\'converged\']]')
lines.append('')
lines.append('')
lines.append('def get_failed_vectors():')
lines.append(' """Return only vectors that did not converge (for negative testing)."""')
lines.append(' return [v for v in TEST_VECTORS if not v[\'converged\']]')
lines.append('')
return '\n'.join(lines)
def generate_firmware_header(vectors):
"""Generate C header for PicoRV32 firmware."""
lines = []
lines.append('/*')
lines.append(' * Auto-generated test vectors for LDPC decoder firmware')
lines.append(' * Generated by model/gen_firmware_vectors.py')
lines.append(' *')
lines.append(' * LLR packing: 5 LLRs per 32-bit word, 6 bits each (two\'s complement)')
lines.append(' * Word bits [5:0] = LLR[5*i+0]')
lines.append(' * Word bits [11:6] = LLR[5*i+1]')
lines.append(' * Word bits [17:12] = LLR[5*i+2]')
lines.append(' * Word bits [23:18] = LLR[5*i+3]')
lines.append(' * Word bits [29:24] = LLR[5*i+4]')
lines.append(' */')
lines.append('')
lines.append('#ifndef TEST_VECTORS_H')
lines.append('#define TEST_VECTORS_H')
lines.append('')
lines.append('#include <stdint.h>')
lines.append('')
lines.append(f'#define NUM_TEST_VECTORS {len(vectors)}')
lines.append(f'#define LLR_WORDS_PER_VECTOR {N_WORDS}')
lines.append('')
# Generate per-vector arrays
for vec in vectors:
idx = vec['index']
llr_words = pack_llr_words(vec['llr_quantized'])
decoded_word = bits_to_uint32(vec['decoded_bits'])
lines.append(f'/* Vector {idx}: converged={vec["converged"]}, '
f'iterations={vec["iterations"]}, '
f'syndrome_weight={vec["syndrome_weight"]}, '
f'bit_errors={vec["bit_errors"]} */')
lines.append(f'static const uint32_t tv{idx}_llr[{N_WORDS}] = {{')
for chunk_start in range(0, len(llr_words), 4):
chunk = llr_words[chunk_start:chunk_start + 4]
hex_str = ', '.join(f'0x{w:08X}' for w in chunk)
comma = ',' if chunk_start + 4 < len(llr_words) else ''
lines.append(f' {hex_str}{comma}')
lines.append('};')
lines.append(f'static const uint32_t tv{idx}_decoded = 0x{decoded_word:08X};')
lines.append(f'static const int tv{idx}_converged = {1 if vec["converged"] else 0};')
lines.append(f'static const int tv{idx}_iterations = {vec["iterations"]};')
lines.append(f'static const int tv{idx}_syndrome_weight = {vec["syndrome_weight"]};')
lines.append('')
# Generate array-of-pointers for easy iteration
lines.append('/* Array of LLR pointers for iteration */')
lines.append(f'static const uint32_t * const tv_llr[NUM_TEST_VECTORS] = {{')
for i, vec in enumerate(vectors):
comma = ',' if i < len(vectors) - 1 else ''
lines.append(f' tv{vec["index"]}_llr{comma}')
lines.append('};')
lines.append('')
lines.append(f'static const uint32_t tv_decoded[NUM_TEST_VECTORS] = {{')
for i, vec in enumerate(vectors):
decoded_word = bits_to_uint32(vec['decoded_bits'])
comma = ',' if i < len(vectors) - 1 else ''
lines.append(f' 0x{decoded_word:08X}{comma} /* tv{vec["index"]} */')
lines.append('};')
lines.append('')
lines.append(f'static const int tv_converged[NUM_TEST_VECTORS] = {{')
vals = ', '.join(str(1 if v['converged'] else 0) for v in vectors)
lines.append(f' {vals}')
lines.append('};')
lines.append('')
lines.append(f'static const int tv_iterations[NUM_TEST_VECTORS] = {{')
vals = ', '.join(str(v['iterations']) for v in vectors)
lines.append(f' {vals}')
lines.append('};')
lines.append('')
lines.append(f'static const int tv_syndrome_weight[NUM_TEST_VECTORS] = {{')
vals = ', '.join(str(v['syndrome_weight']) for v in vectors)
lines.append(f' {vals}')
lines.append('};')
lines.append('')
lines.append('#endif /* TEST_VECTORS_H */')
lines.append('')
return '\n'.join(lines)
def main():
# Load test vectors
print(f'Reading {INPUT_FILE}...')
with open(INPUT_FILE) as f:
vectors = json.load(f)
print(f' Loaded {len(vectors)} vectors')
converged = sum(1 for v in vectors if v['converged'])
print(f' Converged: {converged}/{len(vectors)}')
# Generate cocotb test data
cocotb_content = generate_cocotb_test_data(vectors)
os.makedirs(os.path.dirname(COCOTB_OUTPUT), exist_ok=True)
with open(COCOTB_OUTPUT, 'w') as f:
f.write(cocotb_content)
print(f' Wrote {COCOTB_OUTPUT}')
# Generate firmware header
firmware_content = generate_firmware_header(vectors)
os.makedirs(os.path.dirname(FIRMWARE_OUTPUT), exist_ok=True)
with open(FIRMWARE_OUTPUT, 'w') as f:
f.write(firmware_content)
print(f' Wrote {FIRMWARE_OUTPUT}')
# Verify: check roundtrip of LLR packing
print('\nVerifying LLR packing roundtrip...')
for vec in vectors:
llr_q = vec['llr_quantized']
words = pack_llr_words(llr_q)
# Unpack and compare
for w_idx, word in enumerate(words):
for p in range(LLRS_PER_WORD):
llr_idx = w_idx * LLRS_PER_WORD + p
if llr_idx >= N_LLR:
break
tc_val = (word >> (p * Q_BITS)) & 0x3F
# Convert back to signed
if tc_val >= 32:
signed_val = tc_val - 64
else:
signed_val = tc_val
expected = llr_q[llr_idx]
assert signed_val == expected, (
f'Vec {vec["index"]}, LLR[{llr_idx}]: '
f'packed={signed_val}, expected={expected}'
)
print(' LLR packing roundtrip OK for all vectors')
print('\nDone.')
if __name__ == '__main__':
main()