from hypothesis import given, strategies as st
from domin8.tools.diff_utils import parse_hunks, split_diff_by_file, normalize_diff
@st.composite
def random_hunk(draw):
# Create a small realistic hunk header and random lines
old_start = draw(st.integers(min_value=1, max_value=200))
old_len = draw(st.integers(min_value=1, max_value=10))
new_start = draw(st.integers(min_value=1, max_value=200))
new_len = draw(st.integers(min_value=1, max_value=10))
header = f"@@ -{old_start},{old_len} +{new_start},{new_len} @@\n"
lines = draw(st.lists(st.text(min_size=0, max_size=80), min_size=0, max_size=20))
# Prefix each line with one of ' ', '+', '-'
prefixed = [draw(st.sampled_from([' ', '+', '-'])) + l + '\n' for l in lines]
return header + ''.join(prefixed)
@st.composite
def random_diff(draw):
num_files = draw(st.integers(min_value=1, max_value=3))
diffs = []
for i in range(num_files):
path = f"a/file{i}.txt"
header = f"--- a/{path}\n+++ b/{path}\n"
hunks = draw(st.lists(random_hunk(), min_size=0, max_size=3))
diffs.append(header + ''.join(hunks))
return '\n'.join(diffs)
@given(random_diff())
def test_normalize_diff_does_not_crash(d):
# Assert that normalize_diff handles the random diffs without raising
nd = normalize_diff(d)
assert isinstance(nd, list)
@given(st.text(min_size=1, max_size=500))
def test_split_and_parse_random_text(s):
# split_diff_by_file and parse_hunks should not raise on random text
chunks = split_diff_by_file(s)
for c in chunks:
# parse_hunks returns a list
h = parse_hunks(c)
assert isinstance(h, list)