Add gen_verilator_vectors.py to convert test_vectors.json into hex files for $readmemh, and tb_ldpc_vectors.sv to drive 20 test vectors through the RTL decoder and verify bit-exact matching against the Python model. All 11 converged vectors pass with exact decoded word, convergence flag, and zero syndrome weight. All 9 non-converged vectors match the Python model's decoded word, iteration count, and syndrome weight exactly. Three RTL bugs fixed in ldpc_decoder_core.sv during testing: - Magnitude overflow: -32 (6'b100000) negation overflowed 5-bit field to 0; now clamped to max magnitude 31 - Converged flag persistence: moved clearing from IDLE to INIT so host can read results after decode completes - msg_cn2vn zeroing: bypass stale array reads on first iteration (iter_cnt==0) to avoid Verilator scheduling issues with large 3D array initialization Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
189 lines
6.6 KiB
Python
189 lines
6.6 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Generate hex files for Verilator $readmemh from Python model test vectors.
|
|
|
|
Reads data/test_vectors.json and produces:
|
|
tb/vectors/llr_words.hex - LLR data packed as 32-bit hex words
|
|
tb/vectors/expected.hex - Expected decode results
|
|
tb/vectors/num_vectors.txt - Vector count
|
|
|
|
LLR packing format (matches wishbone_interface.sv):
|
|
Each 32-bit word holds 5 LLRs, 6 bits each, in two's complement.
|
|
Word[i] bits [5:0] = LLR[5*i+0]
|
|
Word[i] bits [11:6] = LLR[5*i+1]
|
|
Word[i] bits [17:12] = LLR[5*i+2]
|
|
Word[i] bits [23:18] = LLR[5*i+3]
|
|
Word[i] bits [29:24] = LLR[5*i+4]
|
|
52 words cover 260 LLRs (256 used, last 4 are zero-padded).
|
|
|
|
Expected output format (per vector, 4 lines):
|
|
Line 0: decoded_word (32-bit hex, info bits packed LSB-first)
|
|
Line 1: converged (00000000 or 00000001)
|
|
Line 2: iterations (32-bit hex)
|
|
Line 3: syndrome_weight (32-bit hex)
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import sys
|
|
|
|
# Paths relative to this script's directory
|
|
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
PROJECT_DIR = os.path.dirname(SCRIPT_DIR)
|
|
INPUT_FILE = os.path.join(PROJECT_DIR, 'data', 'test_vectors.json')
|
|
OUTPUT_DIR = os.path.join(PROJECT_DIR, 'tb', 'vectors')
|
|
|
|
Q_BITS = 6
|
|
LLRS_PER_WORD = 5
|
|
N_LLR = 256
|
|
N_WORDS = (N_LLR + LLRS_PER_WORD - 1) // LLRS_PER_WORD # 52
|
|
K = 32
|
|
|
|
LINES_PER_EXPECTED = 4 # decoded_word, converged, iterations, syndrome_weight
|
|
|
|
|
|
def signed_to_twos_complement(val, bits=Q_BITS):
|
|
"""Convert signed integer to two's complement unsigned representation."""
|
|
if val < 0:
|
|
return val + (1 << bits)
|
|
return val & ((1 << bits) - 1)
|
|
|
|
|
|
def pack_llr_words(llr_quantized):
|
|
"""
|
|
Pack 256 signed LLRs into 52 uint32 words.
|
|
|
|
Each word contains 5 LLRs, 6 bits each:
|
|
bits[5:0] = LLR[5*word + 0]
|
|
bits[11:6] = LLR[5*word + 1]
|
|
bits[17:12] = LLR[5*word + 2]
|
|
bits[23:18] = LLR[5*word + 3]
|
|
bits[29:24] = LLR[5*word + 4]
|
|
"""
|
|
# Pad to 260 entries (52 * 5)
|
|
padded = list(llr_quantized) + [0] * (N_WORDS * LLRS_PER_WORD - N_LLR)
|
|
|
|
words = []
|
|
for w in range(N_WORDS):
|
|
word = 0
|
|
for p in range(LLRS_PER_WORD):
|
|
llr_idx = w * LLRS_PER_WORD + p
|
|
tc = signed_to_twos_complement(padded[llr_idx])
|
|
word |= (tc & 0x3F) << (p * Q_BITS)
|
|
words.append(word)
|
|
return words
|
|
|
|
|
|
def bits_to_uint32(bits):
|
|
"""Convert a list of 32 binary values to a single uint32 (bit 0 = LSB)."""
|
|
val = 0
|
|
for i, b in enumerate(bits):
|
|
if b:
|
|
val |= (1 << i)
|
|
return val
|
|
|
|
|
|
def main():
|
|
# Load test vectors
|
|
print(f'Reading {INPUT_FILE}...')
|
|
with open(INPUT_FILE) as f:
|
|
vectors = json.load(f)
|
|
num_vectors = len(vectors)
|
|
converged_count = sum(1 for v in vectors if v['converged'])
|
|
print(f' Loaded {num_vectors} vectors ({converged_count} converged, '
|
|
f'{num_vectors - converged_count} non-converged)')
|
|
|
|
# Create output directory
|
|
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
|
|
|
# =========================================================================
|
|
# Generate llr_words.hex
|
|
# =========================================================================
|
|
# Format: one 32-bit hex word per line, 52 words per vector
|
|
# Total lines = 52 * num_vectors
|
|
llr_lines = []
|
|
for vec in vectors:
|
|
llr_words = pack_llr_words(vec['llr_quantized'])
|
|
assert len(llr_words) == N_WORDS
|
|
for word in llr_words:
|
|
llr_lines.append(f'{word:08X}')
|
|
|
|
llr_path = os.path.join(OUTPUT_DIR, 'llr_words.hex')
|
|
with open(llr_path, 'w') as f:
|
|
f.write('\n'.join(llr_lines) + '\n')
|
|
print(f' Wrote {llr_path} ({len(llr_lines)} lines, {N_WORDS} words/vector)')
|
|
|
|
# =========================================================================
|
|
# Generate expected.hex
|
|
# =========================================================================
|
|
# Format: 4 lines per vector (all 32-bit hex)
|
|
# Line 0: decoded_word (info bits packed LSB-first)
|
|
# Line 1: converged (00000000 or 00000001)
|
|
# Line 2: iterations
|
|
# Line 3: syndrome_weight
|
|
expected_lines = []
|
|
for vec in vectors:
|
|
decoded_word = bits_to_uint32(vec['decoded_bits'])
|
|
converged = 1 if vec['converged'] else 0
|
|
iterations = vec['iterations']
|
|
syndrome_weight = vec['syndrome_weight']
|
|
|
|
expected_lines.append(f'{decoded_word:08X}')
|
|
expected_lines.append(f'{converged:08X}')
|
|
expected_lines.append(f'{iterations:08X}')
|
|
expected_lines.append(f'{syndrome_weight:08X}')
|
|
|
|
expected_path = os.path.join(OUTPUT_DIR, 'expected.hex')
|
|
with open(expected_path, 'w') as f:
|
|
f.write('\n'.join(expected_lines) + '\n')
|
|
print(f' Wrote {expected_path} ({len(expected_lines)} lines, '
|
|
f'{LINES_PER_EXPECTED} lines/vector)')
|
|
|
|
# =========================================================================
|
|
# Generate num_vectors.txt
|
|
# =========================================================================
|
|
num_path = os.path.join(OUTPUT_DIR, 'num_vectors.txt')
|
|
with open(num_path, 'w') as f:
|
|
f.write(f'{num_vectors}\n')
|
|
print(f' Wrote {num_path} ({num_vectors})')
|
|
|
|
# =========================================================================
|
|
# Verify LLR packing roundtrip
|
|
# =========================================================================
|
|
print('\nVerifying LLR packing roundtrip...')
|
|
for vec in vectors:
|
|
llr_q = vec['llr_quantized']
|
|
words = pack_llr_words(llr_q)
|
|
for w_idx, word in enumerate(words):
|
|
for p in range(LLRS_PER_WORD):
|
|
llr_idx = w_idx * LLRS_PER_WORD + p
|
|
if llr_idx >= N_LLR:
|
|
break
|
|
tc_val = (word >> (p * Q_BITS)) & 0x3F
|
|
# Convert back to signed
|
|
if tc_val >= 32:
|
|
signed_val = tc_val - 64
|
|
else:
|
|
signed_val = tc_val
|
|
expected = llr_q[llr_idx]
|
|
assert signed_val == expected, (
|
|
f'Vec {vec["index"]}, LLR[{llr_idx}]: '
|
|
f'packed={signed_val}, expected={expected}'
|
|
)
|
|
print(' LLR packing roundtrip OK for all vectors')
|
|
|
|
# Print summary of expected results
|
|
print('\nExpected results summary:')
|
|
for vec in vectors:
|
|
decoded_word = bits_to_uint32(vec['decoded_bits'])
|
|
print(f' Vec {vec["index"]:2d}: decoded=0x{decoded_word:08X}, '
|
|
f'converged={vec["converged"]}, '
|
|
f'iter={vec["iterations"]}, '
|
|
f'syn_wt={vec["syndrome_weight"]}')
|
|
|
|
print('\nDone.')
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|