Skip to content

Filler Plugin

A pytest plugin that provides fixtures that fill tests and generate fixtures.

Top-level pytest configuration file providing: - Command-line options, - Test-fixtures that can be used by all test cases, and that modifies pytest hooks in order to fill test specs for all tests and writes the generated fixtures to file.

default_output_directory()

The default directory to store the generated test fixtures. Defined as a function to allow for easier testing.

Source code in src/pytest_plugins/filler/filler.py
40
41
42
43
44
45
def default_output_directory() -> str:
    """
    The default directory to store the generated test fixtures. Defined as a
    function to allow for easier testing.
    """
    return "./fixtures"

default_html_report_filename()

The default file to store the generated HTML test report. Defined as a function to allow for easier testing.

Source code in src/pytest_plugins/filler/filler.py
48
49
50
51
52
53
def default_html_report_filename() -> str:
    """
    The default file to store the generated HTML test report. Defined as a
    function to allow for easier testing.
    """
    return "report_fill.html"

strip_output_tarball_suffix(output)

Strip the '.tar.gz' suffix from the output path.

Source code in src/pytest_plugins/filler/filler.py
56
57
58
59
60
61
62
def strip_output_tarball_suffix(output: Path) -> Path:
    """
    Strip the '.tar.gz' suffix from the output path.
    """
    if str(output).endswith(".tar.gz"):
        return output.with_suffix("").with_suffix("")
    return output

is_output_stdout(output)

Returns True if the fixture output is configured to be stdout.

Source code in src/pytest_plugins/filler/filler.py
65
66
67
68
69
def is_output_stdout(output: Path) -> bool:
    """
    Returns True if the fixture output is configured to be stdout.
    """
    return strip_output_tarball_suffix(output).name == "stdout"

pytest_addoption(parser)

Adds command-line options to pytest.

Source code in src/pytest_plugins/filler/filler.py
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
def pytest_addoption(parser: pytest.Parser):
    """
    Adds command-line options to pytest.
    """
    evm_group = parser.getgroup("evm", "Arguments defining evm executable behavior")
    evm_group.addoption(
        "--evm-bin",
        action="store",
        dest="evm_bin",
        type=Path,
        default=None,
        help=(
            "Path to an evm executable that provides `t8n`. Default: First 'evm' entry in PATH."
        ),
    )
    evm_group.addoption(
        "--traces",
        action="store_true",
        dest="evm_collect_traces",
        default=None,
        help="Collect traces of the execution information from the transition tool.",
    )
    evm_group.addoption(
        "--verify-fixtures",
        action="store_true",
        dest="verify_fixtures",
        default=False,
        help=(
            "Verify generated fixture JSON files using geth's evm blocktest command. "
            "By default, the same evm binary as for the t8n tool is used. A different (geth) evm "
            "binary may be specified via --verify-fixtures-bin, this must be specified if filling "
            "with a non-geth t8n tool that does not support blocktest."
        ),
    )
    evm_group.addoption(
        "--verify-fixtures-bin",
        action="store",
        dest="verify_fixtures_bin",
        type=Path,
        default=None,
        help=(
            "Path to an evm executable that provides the `blocktest` command. "
            "Default: The first (geth) 'evm' entry in PATH."
        ),
    )

    solc_group = parser.getgroup("solc", "Arguments defining the solc executable")
    solc_group.addoption(
        "--solc-bin",
        action="store",
        dest="solc_bin",
        default=None,
        help=(
            "Path to a solc executable (for Yul source compilation). "
            "Default: First 'solc' entry in PATH."
        ),
    )

    test_group = parser.getgroup("tests", "Arguments defining filler location and output")
    test_group.addoption(
        "--filler-path",
        action="store",
        dest="filler_path",
        default="./tests/",
        type=Path,
        help="Path to filler directives",
    )
    test_group.addoption(
        "--output",
        action="store",
        dest="output",
        type=Path,
        default=Path(default_output_directory()),
        help=(
            "Directory path to store the generated test fixtures. "
            "If the specified path ends in '.tar.gz', then the specified tarball is additionally "
            "created (the fixtures are still written to the specified path without the '.tar.gz' "
            f"suffix). Can be deleted. Default: '{default_output_directory()}'."
        ),
    )
    test_group.addoption(
        "--flat-output",
        action="store_true",
        dest="flat_output",
        default=False,
        help="Output each test case in the directory without the folder structure.",
    )
    test_group.addoption(
        "--single-fixture-per-file",
        action="store_true",
        dest="single_fixture_per_file",
        default=False,
        help=(
            "Don't group fixtures in JSON files by test function; write each fixture to its own "
            "file. This can be used to increase the granularity of --verify-fixtures."
        ),
    )
    test_group.addoption(
        "--no-html",
        action="store_true",
        dest="disable_html",
        default=False,
        help=(
            "Don't generate an HTML test report (in the output directory). "
            "The --html flag can be used to specify a different path."
        ),
    )
    test_group.addoption(
        "--build-name",
        action="store",
        dest="build_name",
        default=None,
        type=str,
        help="Specify a build name for the fixtures.ini file, e.g., 'stable'.",
    )

    debug_group = parser.getgroup("debug", "Arguments defining debug behavior")
    debug_group.addoption(
        "--evm-dump-dir",
        "--t8n-dump-dir",
        action="store",
        dest="base_dump_dir",
        default="",
        help="Path to dump the transition tool debug output.",
    )

pytest_configure(config)

Pytest hook called after command line options have been parsed and before test collection begins.

Couple of notes: 1. Register the plugin's custom markers and process command-line options.

Custom marker registration:
https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
  1. @pytest.hookimpl(tryfirst=True) is applied to ensure that this hook is called before the pytest-html plugin's pytest_configure to ensure that it uses the modified htmlpath option.
Source code in src/pytest_plugins/filler/filler.py
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
    """
    Pytest hook called after command line options have been parsed and before
    test collection begins.

    Couple of notes:
    1. Register the plugin's custom markers and process command-line options.

        Custom marker registration:
        https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers

    2. `@pytest.hookimpl(tryfirst=True)` is applied to ensure that this hook is
        called before the pytest-html plugin's pytest_configure to ensure that
        it uses the modified `htmlpath` option.
    """
    for fixture_format in FixtureFormats:
        config.addinivalue_line(
            "markers",
            (
                f"{fixture_format.name.lower()}: "
                f"{FixtureFormats.get_format_description(fixture_format)}"
            ),
        )
    config.addinivalue_line(
        "markers",
        "yul_test: a test case that compiles Yul code.",
    )
    config.addinivalue_line(
        "markers",
        "compile_yul_with(fork): Always compile Yul source using the corresponding evm version.",
    )
    if config.option.collectonly:
        return
    if not config.getoption("disable_html") and config.getoption("htmlpath") is None:
        # generate an html report by default, unless explicitly disabled
        config.option.htmlpath = (
            strip_output_tarball_suffix(config.getoption("output"))
            / default_html_report_filename()
        )
    # Instantiate the transition tool here to check that the binary path/trace option is valid.
    # This ensures we only raise an error once, if appropriate, instead of for every test.
    t8n = TransitionTool.from_binary_path(
        binary_path=config.getoption("evm_bin"), trace=config.getoption("evm_collect_traces")
    )
    if (
        isinstance(config.getoption("numprocesses"), int)
        and config.getoption("numprocesses") > 0
        and "Besu" in str(t8n.detect_binary_pattern)
    ):
        pytest.exit(
            "The Besu t8n tool does not work well with the xdist plugin; use -n=0.",
            returncode=pytest.ExitCode.USAGE_ERROR,
        )
    config.solc_version = Solc(config.getoption("solc_bin")).version
    if config.solc_version < Frontier.solc_min_version():
        pytest.exit(
            f"Unsupported solc version: {config.solc_version}. Minimum required version is "
            f"{Frontier.solc_min_version()}",
            returncode=pytest.ExitCode.USAGE_ERROR,
        )

    config.stash[metadata_key]["Tools"] = {
        "t8n": t8n.version(),
        "solc": str(config.solc_version),
    }
    command_line_args = "fill " + " ".join(config.invocation_params.args)
    config.stash[metadata_key]["Command-line args"] = f"<code>{command_line_args}</code>"

pytest_report_header(config)

Add lines to pytest's console output header

Source code in src/pytest_plugins/filler/filler.py
269
270
271
272
273
274
275
276
@pytest.hookimpl(trylast=True)
def pytest_report_header(config: pytest.Config):
    """Add lines to pytest's console output header"""
    if config.option.collectonly:
        return
    t8n_version = config.stash[metadata_key]["Tools"]["t8n"]
    solc_version = config.stash[metadata_key]["Tools"]["solc"]
    return [(f"{t8n_version}, {solc_version}")]

pytest_report_teststatus(report, config)

Disable test session progress report if we're writing the JSON fixtures to stdout to be read by a consume command on stdin. I.e., don't write this type of output to the console:

...x...
Source code in src/pytest_plugins/filler/filler.py
279
280
281
282
283
284
285
286
287
288
289
290
def pytest_report_teststatus(report, config: pytest.Config):
    """
    Disable test session progress report if we're writing the JSON fixtures to
    stdout to be read by a consume command on stdin. I.e., don't write this
    type of output to the console:

    ```text
    ...x...
    ```
    """
    if is_output_stdout(config.getoption("output")):
        return report.outcome, "", report.outcome.upper()

pytest_metadata(metadata)

Add or remove metadata to/from the pytest report.

Source code in src/pytest_plugins/filler/filler.py
293
294
295
296
297
def pytest_metadata(metadata):
    """
    Add or remove metadata to/from the pytest report.
    """
    metadata.pop("JAVA_HOME", None)

pytest_html_results_table_header(cells)

Customize the table headers of the HTML report table.

Source code in src/pytest_plugins/filler/filler.py
300
301
302
303
304
305
306
def pytest_html_results_table_header(cells):
    """
    Customize the table headers of the HTML report table.
    """
    cells.insert(3, '<th class="sortable" data-column-type="fixturePath">JSON Fixture File</th>')
    cells.insert(4, '<th class="sortable" data-column-type="evmDumpDir">EVM Dump Dir</th>')
    del cells[-1]  # Remove the "Links" column

pytest_html_results_table_row(report, cells)

Customize the table rows of the HTML report table.

Source code in src/pytest_plugins/filler/filler.py
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
def pytest_html_results_table_row(report, cells):
    """
    Customize the table rows of the HTML report table.
    """
    if hasattr(report, "user_properties"):
        user_props = dict(report.user_properties)
        if (
            report.passed
            and "fixture_path_absolute" in user_props
            and "fixture_path_relative" in user_props
        ):
            fixture_path_absolute = user_props["fixture_path_absolute"]
            fixture_path_relative = user_props["fixture_path_relative"]
            fixture_path_link = (
                f'<a href="{fixture_path_absolute}" target="_blank">{fixture_path_relative}</a>'
            )
            cells.insert(3, f"<td>{fixture_path_link}</td>")
        elif report.failed:
            cells.insert(3, "<td>Fixture unavailable</td>")
        if "evm_dump_dir" in user_props:
            if user_props["evm_dump_dir"] is None:
                cells.insert(
                    4, "<td>For t8n debug info use <code>--evm-dump-dir=path --traces</code></td>"
                )
            else:
                evm_dump_dir = user_props.get("evm_dump_dir")
                if evm_dump_dir == "N/A":
                    evm_dump_entry = "N/A"
                else:
                    evm_dump_entry = f'<a href="{evm_dump_dir}" target="_blank">{evm_dump_dir}</a>'
                cells.insert(4, f"<td>{evm_dump_entry}</td>")
    del cells[-1]  # Remove the "Links" column

pytest_runtest_makereport(item, call)

This hook is called when each test is run and a report is being made.

Make each test's fixture json path available to the test report via user_properties.

Source code in src/pytest_plugins/filler/filler.py
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
    """
    This hook is called when each test is run and a report is being made.

    Make each test's fixture json path available to the test report via
    user_properties.
    """
    outcome = yield
    report = outcome.get_result()

    if call.when == "call":
        if hasattr(item.config, "fixture_path_absolute") and hasattr(
            item.config, "fixture_path_relative"
        ):
            report.user_properties.append(
                ("fixture_path_absolute", item.config.fixture_path_absolute)
            )
            report.user_properties.append(
                ("fixture_path_relative", item.config.fixture_path_relative)
            )
        if hasattr(item.config, "evm_dump_dir") and hasattr(item.config, "fixture_format"):
            if item.config.fixture_format in [
                "state_test",
                "blockchain_test",
                "blockchain_test_engine",
            ]:
                report.user_properties.append(("evm_dump_dir", item.config.evm_dump_dir))
            else:
                report.user_properties.append(("evm_dump_dir", "N/A"))  # not yet for EOF

pytest_html_report_title(report)

Set the HTML report title (pytest-html plugin).

Source code in src/pytest_plugins/filler/filler.py
375
376
377
378
379
def pytest_html_report_title(report):
    """
    Set the HTML report title (pytest-html plugin).
    """
    report.title = "Fill Test Report"

evm_bin(request)

Returns the configured evm tool binary path used to run t8n.

Source code in src/pytest_plugins/filler/filler.py
382
383
384
385
386
387
@pytest.fixture(autouse=True, scope="session")
def evm_bin(request: pytest.FixtureRequest) -> Path:
    """
    Returns the configured evm tool binary path used to run t8n.
    """
    return request.config.getoption("evm_bin")

verify_fixtures_bin(request)

Returns the configured evm tool binary path used to run statetest or blocktest.

Source code in src/pytest_plugins/filler/filler.py
390
391
392
393
394
395
396
@pytest.fixture(autouse=True, scope="session")
def verify_fixtures_bin(request: pytest.FixtureRequest) -> Path | None:
    """
    Returns the configured evm tool binary path used to run statetest or
    blocktest.
    """
    return request.config.getoption("verify_fixtures_bin")

solc_bin(request)

Returns the configured solc binary path.

Source code in src/pytest_plugins/filler/filler.py
399
400
401
402
403
404
@pytest.fixture(autouse=True, scope="session")
def solc_bin(request: pytest.FixtureRequest):
    """
    Returns the configured solc binary path.
    """
    return request.config.getoption("solc_bin")

t8n(request, evm_bin)

Returns the configured transition tool.

Source code in src/pytest_plugins/filler/filler.py
407
408
409
410
411
412
413
414
415
416
@pytest.fixture(autouse=True, scope="session")
def t8n(request: pytest.FixtureRequest, evm_bin: Path) -> Generator[TransitionTool, None, None]:
    """
    Returns the configured transition tool.
    """
    t8n = TransitionTool.from_binary_path(
        binary_path=evm_bin, trace=request.config.getoption("evm_collect_traces")
    )
    yield t8n
    t8n.shutdown()

do_fixture_verification(request, verify_fixtures_bin)

Returns True if evm statetest or evm blocktest should be ran on the generated fixture JSON files.

Source code in src/pytest_plugins/filler/filler.py
419
420
421
422
423
424
425
426
427
428
429
430
431
432
@pytest.fixture(scope="session")
def do_fixture_verification(
    request: pytest.FixtureRequest, verify_fixtures_bin: Path | None
) -> bool:
    """
    Returns True if evm statetest or evm blocktest should be ran on the
    generated fixture JSON files.
    """
    do_fixture_verification = False
    if verify_fixtures_bin:
        do_fixture_verification = True
    if request.config.getoption("verify_fixtures"):
        do_fixture_verification = True
    return do_fixture_verification

evm_fixture_verification(do_fixture_verification, evm_bin, verify_fixtures_bin)

Returns the configured evm binary for executing statetest and blocktest commands used to verify generated JSON fixtures.

Source code in src/pytest_plugins/filler/filler.py
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
@pytest.fixture(autouse=True, scope="session")
def evm_fixture_verification(
    do_fixture_verification: bool,
    evm_bin: Path,
    verify_fixtures_bin: Path | None,
) -> Generator[TransitionTool | None, None, None]:
    """
    Returns the configured evm binary for executing statetest and blocktest
    commands used to verify generated JSON fixtures.
    """
    if not do_fixture_verification:
        yield None
        return
    if not verify_fixtures_bin and evm_bin:
        verify_fixtures_bin = evm_bin
    evm_fixture_verification = TransitionTool.from_binary_path(binary_path=verify_fixtures_bin)
    if not evm_fixture_verification.blocktest_subcommand:
        pytest.exit(
            "Only geth's evm tool is supported to verify fixtures: "
            "Either remove --verify-fixtures or set --verify-fixtures-bin to a Geth evm binary.",
            returncode=pytest.ExitCode.USAGE_ERROR,
        )
    yield evm_fixture_verification
    evm_fixture_verification.shutdown()

base_dump_dir(request)

The base directory to dump the evm debug output.

Source code in src/pytest_plugins/filler/filler.py
461
462
463
464
465
466
467
468
469
@pytest.fixture(scope="session")
def base_dump_dir(request: pytest.FixtureRequest) -> Path | None:
    """
    The base directory to dump the evm debug output.
    """
    base_dump_dir_str = request.config.getoption("base_dump_dir")
    if base_dump_dir_str:
        return Path(base_dump_dir_str)
    return None

is_output_tarball(request)

Returns True if the output directory is a tarball.

Source code in src/pytest_plugins/filler/filler.py
472
473
474
475
476
477
478
479
480
@pytest.fixture(scope="session")
def is_output_tarball(request: pytest.FixtureRequest) -> bool:
    """
    Returns True if the output directory is a tarball.
    """
    output: Path = request.config.getoption("output")
    if output.suffix == ".gz" and output.with_suffix("").suffix == ".tar":
        return True
    return False

output_dir(request, is_output_tarball)

Returns the directory to store the generated test fixtures.

Source code in src/pytest_plugins/filler/filler.py
483
484
485
486
487
488
489
490
491
@pytest.fixture(scope="session")
def output_dir(request: pytest.FixtureRequest, is_output_tarball: bool) -> Path:
    """
    Returns the directory to store the generated test fixtures.
    """
    output = request.config.getoption("output")
    if is_output_tarball:
        return strip_output_tarball_suffix(output)
    return output

create_properties_file(request, output_dir)

Creates an ini file with fixture build properties in the fixture output directory.

Source code in src/pytest_plugins/filler/filler.py
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
@pytest.fixture(scope="session", autouse=True)
def create_properties_file(request: pytest.FixtureRequest, output_dir: Path) -> None:
    """
    Creates an ini file with fixture build properties in the fixture output
    directory.
    """
    if is_output_stdout(request.config.getoption("output")):
        return
    if not output_dir.exists():
        output_dir.mkdir(parents=True)

    fixture_properties = {
        "timestamp": datetime.datetime.now().isoformat(),
    }
    if build_name := request.config.getoption("build_name"):
        fixture_properties["build"] = build_name
    if github_ref := os.getenv("GITHUB_REF"):
        fixture_properties["ref"] = github_ref
    if github_sha := os.getenv("GITHUB_SHA"):
        fixture_properties["commit"] = github_sha
    command_line_args = request.config.stash[metadata_key]["Command-line args"]
    command_line_args = command_line_args.replace("<code>", "").replace("</code>", "")
    fixture_properties["command_line_args"] = command_line_args

    config = configparser.ConfigParser()
    config["fixtures"] = fixture_properties
    environment_properties = {}
    for key, val in request.config.stash[metadata_key].items():
        if key.lower() == "command-line args":
            continue
        if key.lower() in ["ci", "python", "platform"]:
            environment_properties[key] = val
        elif isinstance(val, dict):
            config[key.lower()] = val
        else:
            warnings.warn(f"Fixtures ini file: Skipping metadata key {key} with value {val}.")
    config["environment"] = environment_properties

    ini_filename = output_dir / "fixtures.ini"
    with open(ini_filename, "w") as f:
        f.write("; This file describes fixture build properties\n\n")
        config.write(f)

create_tarball(request, output_dir, is_output_tarball)

Create a tarball of json files the output directory if the configured output ends with '.tar.gz'.

Only include .json and .ini files in the archive.

Source code in src/pytest_plugins/filler/filler.py
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
@pytest.fixture(scope="session", autouse=True)
def create_tarball(
    request: pytest.FixtureRequest, output_dir: Path, is_output_tarball: bool
) -> Generator[None, None, None]:
    """
    Create a tarball of json files the output directory if the configured
    output ends with '.tar.gz'.

    Only include .json and .ini files in the archive.
    """
    yield
    if is_output_tarball:
        source_dir = output_dir
        tarball_filename = request.config.getoption("output")
        with tarfile.open(tarball_filename, "w:gz") as tar:
            for file in source_dir.rglob("*"):
                if file.suffix in {".json", ".ini"}:
                    arcname = Path("fixtures") / file.relative_to(source_dir)
                    tar.add(file, arcname=arcname)

dump_dir_parameter_level(request, base_dump_dir, filler_path)

The directory to dump evm transition tool debug output on a test parameter level.

Example with --evm-dump-dir=/tmp/evm: -> /tmp/evm/shanghai__eip3855_push0__test_push0__test_push0_key_sstore/fork_shanghai/

Source code in src/pytest_plugins/filler/filler.py
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
@pytest.fixture(scope="function")
def dump_dir_parameter_level(
    request: pytest.FixtureRequest, base_dump_dir: Path | None, filler_path: Path
) -> Path | None:
    """
    The directory to dump evm transition tool debug output on a test parameter
    level.

    Example with --evm-dump-dir=/tmp/evm:
    -> /tmp/evm/shanghai__eip3855_push0__test_push0__test_push0_key_sstore/fork_shanghai/
    """
    evm_dump_dir = node_to_test_info(request.node).get_dump_dir_path(
        base_dump_dir,
        filler_path,
        level="test_parameter",
    )
    # NOTE: Use str for compatibility with pytest-dist
    if evm_dump_dir:
        request.node.config.evm_dump_dir = str(evm_dump_dir)
    else:
        request.node.config.evm_dump_dir = None
    return evm_dump_dir

get_fixture_collection_scope(fixture_name, config)

Return the appropriate scope to write fixture JSON files.

See: https://docs.pytest.org/en/stable/how-to/fixtures.html#dynamic-scope

Source code in src/pytest_plugins/filler/filler.py
583
584
585
586
587
588
589
590
591
592
593
def get_fixture_collection_scope(fixture_name, config):
    """
    Return the appropriate scope to write fixture JSON files.

    See: https://docs.pytest.org/en/stable/how-to/fixtures.html#dynamic-scope
    """
    if is_output_stdout(config.getoption("output")):
        return "session"
    if config.getoption("single_fixture_per_file"):
        return "function"
    return "module"

fixture_collector(request, do_fixture_verification, evm_fixture_verification, filler_path, base_dump_dir, output_dir)

Returns the configured fixture collector instance used for all tests in one test module.

Source code in src/pytest_plugins/filler/filler.py
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
@pytest.fixture(scope=get_fixture_collection_scope)
def fixture_collector(
    request: pytest.FixtureRequest,
    do_fixture_verification: bool,
    evm_fixture_verification: TransitionTool,
    filler_path: Path,
    base_dump_dir: Path | None,
    output_dir: Path,
) -> Generator[FixtureCollector, None, None]:
    """
    Returns the configured fixture collector instance used for all tests
    in one test module.
    """
    fixture_collector = FixtureCollector(
        output_dir=output_dir,
        flat_output=request.config.getoption("flat_output"),
        single_fixture_per_file=request.config.getoption("single_fixture_per_file"),
        filler_path=filler_path,
        base_dump_dir=base_dump_dir,
    )
    yield fixture_collector
    fixture_collector.dump_fixtures()
    if do_fixture_verification:
        fixture_collector.verify_fixture_files(evm_fixture_verification)

filler_path(request)

Returns the directory containing the tests to execute.

Source code in src/pytest_plugins/filler/filler.py
622
623
624
625
626
627
@pytest.fixture(autouse=True, scope="session")
def filler_path(request: pytest.FixtureRequest) -> Path:
    """
    Returns the directory containing the tests to execute.
    """
    return request.config.getoption("filler_path")

eips()

A fixture specifying that, by default, no EIPs should be activated for tests.

This fixture (function) may be redefined in test filler modules in order to overwrite this default and return a list of integers specifying which EIPs should be activated for the tests in scope.

Source code in src/pytest_plugins/filler/filler.py
630
631
632
633
634
635
636
637
638
639
640
@pytest.fixture(autouse=True)
def eips():
    """
    A fixture specifying that, by default, no EIPs should be activated for
    tests.

    This fixture (function) may be redefined in test filler modules in order
    to overwrite this default and return a list of integers specifying which
    EIPs should be activated for the tests in scope.
    """
    return []

yul(fork, request)

A fixture that allows contract code to be defined with Yul code.

This fixture defines a class that wraps the ::ethereum_test_tools.Yul class so that upon instantiation within the test case, it provides the test case's current fork parameter. The forks is then available for use in solc's arguments for the Yul code compilation.

Test cases can override the default value by specifying a fixed version with the @pytest.mark.compile_yul_with(FORK) marker.

Source code in src/pytest_plugins/filler/filler.py
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
@pytest.fixture
def yul(fork: Fork, request):
    """
    A fixture that allows contract code to be defined with Yul code.

    This fixture defines a class that wraps the ::ethereum_test_tools.Yul
    class so that upon instantiation within the test case, it provides the
    test case's current fork parameter. The forks is then available for use
    in solc's arguments for the Yul code compilation.

    Test cases can override the default value by specifying a fixed version
    with the @pytest.mark.compile_yul_with(FORK) marker.
    """
    solc_target_fork: Fork | None
    marker = request.node.get_closest_marker("compile_yul_with")
    if marker:
        if not marker.args[0]:
            pytest.fail(
                f"{request.node.name}: Expected one argument in 'compile_yul_with' marker."
            )
        for fork in request.config.forks:
            if fork.name() == marker.args[0]:
                solc_target_fork = fork
                break
        else:
            pytest.fail(f"{request.node.name}: Fork {marker.args[0]} not found in forks list.")
        assert solc_target_fork in get_forks_with_solc_support(request.config.solc_version)
    else:
        solc_target_fork = get_closest_fork_with_solc_support(fork, request.config.solc_version)
        assert solc_target_fork is not None, "No fork supports provided solc version."
        if solc_target_fork != fork and request.config.getoption("verbose") >= 1:
            warnings.warn(f"Compiling Yul for {solc_target_fork.name()}, not {fork.name()}.")

    class YulWrapper(Yul):
        def __new__(cls, *args, **kwargs):
            return super(YulWrapper, cls).__new__(cls, *args, **kwargs, fork=solc_target_fork)

    return YulWrapper

node_to_test_info(node)

Returns the test info of the current node item.

Source code in src/pytest_plugins/filler/filler.py
686
687
688
689
690
691
692
693
694
695
def node_to_test_info(node: pytest.Item) -> TestInfo:
    """
    Returns the test info of the current node item.
    """
    return TestInfo(
        name=node.name,
        id=node.nodeid,
        original_name=node.originalname,  # type: ignore
        path=Path(node.path),
    )

fixture_source_url(request)

Returns the URL to the fixture source.

Source code in src/pytest_plugins/filler/filler.py
698
699
700
701
702
703
704
705
706
707
708
709
@pytest.fixture(scope="function")
def fixture_source_url(request: pytest.FixtureRequest) -> str:
    """
    Returns the URL to the fixture source.
    """
    function_line_number = request.function.__code__.co_firstlineno
    module_relative_path = os.path.relpath(request.module.__file__)
    hash_or_tag = get_current_commit_hash_or_tag()
    github_url = generate_github_url(
        module_relative_path, branch_or_commit_or_tag=hash_or_tag, line_number=function_line_number
    )
    return github_url

fixture_description(request)

Fixture to extract and combine docstrings from the test class and the test function.

Source code in src/pytest_plugins/filler/filler.py
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
@pytest.fixture(scope="function")
def fixture_description(request: pytest.FixtureRequest) -> str:
    """Fixture to extract and combine docstrings from the test class and the test function."""
    description_unavailable = (
        "No description available - add a docstring to the python test class or function."
    )
    test_class_doc = f"Test class documentation:\n{request.cls.__doc__}" if request.cls else ""
    test_function_doc = (
        f"Test function documentation:\n{request.function.__doc__}"
        if request.function.__doc__
        else ""
    )
    if not test_class_doc and not test_function_doc:
        return description_unavailable
    combined_docstring = f"{test_class_doc}\n\n{test_function_doc}".strip()
    return combined_docstring

base_test_parametrizer(cls)

Generates a pytest.fixture for a given BaseTest subclass.

Implementation detail: All spec fixtures must be scoped on test function level to avoid leakage between tests.

Source code in src/pytest_plugins/filler/filler.py
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
def base_test_parametrizer(cls: Type[BaseTest]):
    """
    Generates a pytest.fixture for a given BaseTest subclass.

    Implementation detail: All spec fixtures must be scoped on test function level to avoid
    leakage between tests.
    """

    @pytest.fixture(
        scope="function",
        name=cls.pytest_parameter_name(),
    )
    def base_test_parametrizer_func(
        request: pytest.FixtureRequest,
        t8n: TransitionTool,
        fork: Fork,
        reference_spec: ReferenceSpec,
        eips: List[int],
        pre: Alloc,
        output_dir: Path,
        dump_dir_parameter_level: Path | None,
        fixture_collector: FixtureCollector,
        fixture_description: str,
        fixture_source_url: str,
    ):
        """
        Fixture used to instantiate an auto-fillable BaseTest object from within
        a test function.

        Every test that defines a test filler must explicitly specify its parameter name
        (see `pytest_parameter_name` in each implementation of BaseTest) in its function
        arguments.

        When parametrize, indirect must be used along with the fixture format as value.
        """
        fixture_format = request.param
        assert isinstance(fixture_format, FixtureFormats)

        class BaseTestWrapper(cls):  # type: ignore
            def __init__(self, *args, **kwargs):
                kwargs["t8n_dump_dir"] = dump_dir_parameter_level
                if "pre" not in kwargs:
                    kwargs["pre"] = pre
                super(BaseTestWrapper, self).__init__(*args, **kwargs)
                fixture = self.generate(
                    t8n=t8n,
                    fork=fork,
                    fixture_format=fixture_format,
                    eips=eips,
                )
                fixture.fill_info(
                    t8n.version(),
                    fixture_description,
                    fixture_source_url=fixture_source_url,
                    ref_spec=reference_spec,
                )

                fixture_path = fixture_collector.add_fixture(
                    node_to_test_info(request.node),
                    fixture,
                )

                # NOTE: Use str for compatibility with pytest-dist
                request.node.config.fixture_path_absolute = str(fixture_path.absolute())
                request.node.config.fixture_path_relative = str(
                    fixture_path.relative_to(output_dir)
                )
                request.node.config.fixture_format = fixture_format.value

        return BaseTestWrapper

    return base_test_parametrizer_func

pytest_generate_tests(metafunc)

Pytest hook used to dynamically generate test cases for each fixture format a given test spec supports.

Source code in src/pytest_plugins/filler/filler.py
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
def pytest_generate_tests(metafunc: pytest.Metafunc):
    """
    Pytest hook used to dynamically generate test cases for each fixture format a given
    test spec supports.
    """
    for test_type in SPEC_TYPES:
        if test_type.pytest_parameter_name() in metafunc.fixturenames:
            metafunc.parametrize(
                [test_type.pytest_parameter_name()],
                [
                    pytest.param(
                        fixture_format,
                        id=fixture_format.name.lower(),
                        marks=[getattr(pytest.mark, fixture_format.name.lower())],
                    )
                    for fixture_format in test_type.supported_fixture_formats
                ],
                scope="function",
                indirect=True,
            )

pytest_collection_modifyitems(config, items)

Remove pre-Paris tests parametrized to generate engine type fixtures; these can't be used in the Hive Pyspec Simulator.

This can't be handled in this plugins pytest_generate_tests() as the fork parametrization occurs in the forks plugin.

Source code in src/pytest_plugins/filler/filler.py
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
def pytest_collection_modifyitems(config, items):
    """
    Remove pre-Paris tests parametrized to generate engine type fixtures; these
    can't be used in the Hive Pyspec Simulator.

    This can't be handled in this plugins pytest_generate_tests() as the fork
    parametrization occurs in the forks plugin.
    """
    for item in items[:]:  # use a copy of the list, as we'll be modifying it
        if isinstance(item, EIPSpecTestItem):
            continue
        if "fork" not in item.callspec.params or item.callspec.params["fork"] is None:
            items.remove(item)
            continue
        if item.callspec.params["fork"] < Paris:
            # Even though the `state_test` test spec does not produce an engine STATE_TEST, it does
            # produce a BLOCKCHAIN_TEST_ENGINE, so we need to remove it here.
            # TODO: Ideally, the logic could be contained in the `FixtureFormat` class, we create
            # a `fork_supported` method that returns True if the fork is supported.
            if ("state_test" in item.callspec.params) and item.callspec.params[
                "state_test"
            ].name.endswith("ENGINE"):
                items.remove(item)
            if ("blockchain_test" in item.callspec.params) and item.callspec.params[
                "blockchain_test"
            ].name.endswith("ENGINE"):
                items.remove(item)
        if "yul" in item.fixturenames:
            item.add_marker(pytest.mark.yul_test)

pytest_make_parametrize_id(config, val, argname)

Pytest hook called when generating test ids. We use this to generate more readable test ids for the generated tests.

Source code in src/pytest_plugins/filler/filler.py
863
864
865
866
867
868
def pytest_make_parametrize_id(config, val, argname):
    """
    Pytest hook called when generating test ids. We use this to generate
    more readable test ids for the generated tests.
    """
    return f"{argname}_{val}"

pytest_runtest_call(item)

Pytest hook called in the context of test execution.

Source code in src/pytest_plugins/filler/filler.py
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
def pytest_runtest_call(item):
    """
    Pytest hook called in the context of test execution.
    """
    if isinstance(item, EIPSpecTestItem):
        return

    class InvalidFiller(Exception):
        def __init__(self, message):
            super().__init__(message)

    if "state_test" in item.fixturenames and "blockchain_test" in item.fixturenames:
        raise InvalidFiller(
            "A filler should only implement either a state test or " "a blockchain test; not both."
        )

    # Check that the test defines either test type as parameter.
    if not any([i for i in item.funcargs if i in SPEC_TYPES_PARAMETERS]):
        pytest.fail(
            "Test must define either one of the following parameters to "
            + "properly generate a test: "
            + ", ".join(SPEC_TYPES_PARAMETERS)
        )

Pre-alloc specifically conditioned for test filling.

pytest_addoption(parser)

Adds command-line options to pytest.

Source code in src/pytest_plugins/filler/pre_alloc.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
def pytest_addoption(parser: pytest.Parser):
    """
    Adds command-line options to pytest.
    """
    pre_alloc_group = parser.getgroup("pre_alloc", "Arguments defining pre-allocation behavior.")

    pre_alloc_group.addoption(
        "--strict-alloc",
        action="store_true",
        dest="strict_alloc",
        default=False,
        help=("[DEBUG ONLY] Disallows deploying a contract in a predefined address."),
    )
    pre_alloc_group.addoption(
        "--ca-start",
        "--contract-address-start",
        action="store",
        dest="test_contract_start_address",
        default=f"{CONTRACT_START_ADDRESS_DEFAULT}",
        type=str,
        help="The starting address from which tests will deploy contracts.",
    )
    pre_alloc_group.addoption(
        "--ca-incr",
        "--contract-address-increment",
        action="store",
        dest="test_contract_address_increments",
        default=f"{CONTRACT_ADDRESS_INCREMENTS_DEFAULT}",
        type=str,
        help="The address increment value to each deployed contract by a test.",
    )

AllocMode

Bases: IntEnum

Allocation mode for the state.

Source code in src/pytest_plugins/filler/pre_alloc.py
69
70
71
72
73
74
75
class AllocMode(IntEnum):
    """
    Allocation mode for the state.
    """

    PERMISSIVE = 0
    STRICT = 1

Alloc

Bases: Alloc

Allocation of accounts in the state, pre and post test execution.

Source code in src/pytest_plugins/filler/pre_alloc.py
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
class Alloc(BaseAlloc):
    """
    Allocation of accounts in the state, pre and post test execution.
    """

    _alloc_mode: AllocMode = PrivateAttr(...)
    _contract_address_iterator: Iterator[Address] = PrivateAttr(...)
    _eoa_iterator: Iterator[EOA] = PrivateAttr(...)

    def __init__(
        self,
        *args,
        alloc_mode: AllocMode,
        contract_address_iterator: Iterator[Address],
        eoa_iterator: Iterator[EOA],
        **kwargs,
    ):
        """
        Initializes the allocation with the given properties.
        """
        super().__init__(*args, **kwargs)
        self._alloc_mode = alloc_mode
        self._contract_address_iterator = contract_address_iterator
        self._eoa_iterator = eoa_iterator

    def __setitem__(self, address: Address | FixedSizeBytesConvertible, account: Account | None):
        """
        Sets the account associated with an address.
        """
        if self._alloc_mode == AllocMode.STRICT:
            raise ValueError("Cannot set items in strict mode")
        super().__setitem__(address, account)

    def deploy_contract(
        self,
        code: BytesConvertible,
        *,
        storage: Storage | StorageRootType = {},
        balance: NumberConvertible = 0,
        nonce: NumberConvertible = 1,
        address: Address | None = None,
        label: str | None = None,
    ) -> Address:
        """
        Deploy a contract to the allocation.

        Warning: `address` parameter is a temporary solution to allow tests to hard-code the
        contract address. Do NOT use in new tests as it will be removed in the future!
        """
        if address is not None:
            assert self._alloc_mode == AllocMode.PERMISSIVE, "address parameter is not supported"
            assert address not in self, f"address {address} already in allocation"
            contract_address = address
        else:
            contract_address = next(self._contract_address_iterator)

        if self._alloc_mode == AllocMode.STRICT:
            assert Number(nonce) >= 1, "impossible to deploy contract with nonce lower than one"

        super().__setitem__(
            contract_address,
            Account(
                nonce=nonce,
                balance=balance,
                code=code,
                storage=storage,
            ),
        )
        if label is None:
            # Try to deduce the label from the code
            frame = inspect.currentframe()
            if frame is not None:
                caller_frame = frame.f_back
                if caller_frame is not None:
                    code_context = inspect.getframeinfo(caller_frame).code_context
                    if code_context is not None:
                        line = code_context[0].strip()
                        if "=" in line:
                            label = line.split("=")[0].strip()

        contract_address.label = label
        return contract_address

    def fund_eoa(self, amount: NumberConvertible = 10**21, label: str | None = None) -> EOA:
        """
        Add a previously unused EOA to the pre-alloc with the balance specified by `amount`.
        """
        eoa = next(self._eoa_iterator)
        super().__setitem__(
            eoa,
            Account(
                nonce=0,
                balance=amount,
            ),
        )
        return eoa

    def fund_address(self, address: Address, amount: NumberConvertible):
        """
        Fund an address with a given amount.

        If the address is already present in the pre-alloc the amount will be
        added to its existing balance.
        """
        if address in self:
            account = self[address]
            if account is not None:
                current_balance = account.balance or 0
                account.balance = ZeroPaddedHexNumber(current_balance + Number(amount))
                return
        super().__setitem__(address, Account(balance=amount))

__init__(*args, alloc_mode, contract_address_iterator, eoa_iterator, **kwargs)

Initializes the allocation with the given properties.

Source code in src/pytest_plugins/filler/pre_alloc.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
def __init__(
    self,
    *args,
    alloc_mode: AllocMode,
    contract_address_iterator: Iterator[Address],
    eoa_iterator: Iterator[EOA],
    **kwargs,
):
    """
    Initializes the allocation with the given properties.
    """
    super().__init__(*args, **kwargs)
    self._alloc_mode = alloc_mode
    self._contract_address_iterator = contract_address_iterator
    self._eoa_iterator = eoa_iterator

__setitem__(address, account)

Sets the account associated with an address.

Source code in src/pytest_plugins/filler/pre_alloc.py
103
104
105
106
107
108
109
def __setitem__(self, address: Address | FixedSizeBytesConvertible, account: Account | None):
    """
    Sets the account associated with an address.
    """
    if self._alloc_mode == AllocMode.STRICT:
        raise ValueError("Cannot set items in strict mode")
    super().__setitem__(address, account)

deploy_contract(code, *, storage={}, balance=0, nonce=1, address=None, label=None)

Deploy a contract to the allocation.

Warning: address parameter is a temporary solution to allow tests to hard-code the contract address. Do NOT use in new tests as it will be removed in the future!

Source code in src/pytest_plugins/filler/pre_alloc.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
def deploy_contract(
    self,
    code: BytesConvertible,
    *,
    storage: Storage | StorageRootType = {},
    balance: NumberConvertible = 0,
    nonce: NumberConvertible = 1,
    address: Address | None = None,
    label: str | None = None,
) -> Address:
    """
    Deploy a contract to the allocation.

    Warning: `address` parameter is a temporary solution to allow tests to hard-code the
    contract address. Do NOT use in new tests as it will be removed in the future!
    """
    if address is not None:
        assert self._alloc_mode == AllocMode.PERMISSIVE, "address parameter is not supported"
        assert address not in self, f"address {address} already in allocation"
        contract_address = address
    else:
        contract_address = next(self._contract_address_iterator)

    if self._alloc_mode == AllocMode.STRICT:
        assert Number(nonce) >= 1, "impossible to deploy contract with nonce lower than one"

    super().__setitem__(
        contract_address,
        Account(
            nonce=nonce,
            balance=balance,
            code=code,
            storage=storage,
        ),
    )
    if label is None:
        # Try to deduce the label from the code
        frame = inspect.currentframe()
        if frame is not None:
            caller_frame = frame.f_back
            if caller_frame is not None:
                code_context = inspect.getframeinfo(caller_frame).code_context
                if code_context is not None:
                    line = code_context[0].strip()
                    if "=" in line:
                        label = line.split("=")[0].strip()

    contract_address.label = label
    return contract_address

fund_eoa(amount=10 ** 21, label=None)

Add a previously unused EOA to the pre-alloc with the balance specified by amount.

Source code in src/pytest_plugins/filler/pre_alloc.py
161
162
163
164
165
166
167
168
169
170
171
172
173
def fund_eoa(self, amount: NumberConvertible = 10**21, label: str | None = None) -> EOA:
    """
    Add a previously unused EOA to the pre-alloc with the balance specified by `amount`.
    """
    eoa = next(self._eoa_iterator)
    super().__setitem__(
        eoa,
        Account(
            nonce=0,
            balance=amount,
        ),
    )
    return eoa

fund_address(address, amount)

Fund an address with a given amount.

If the address is already present in the pre-alloc the amount will be added to its existing balance.

Source code in src/pytest_plugins/filler/pre_alloc.py
175
176
177
178
179
180
181
182
183
184
185
186
187
188
def fund_address(self, address: Address, amount: NumberConvertible):
    """
    Fund an address with a given amount.

    If the address is already present in the pre-alloc the amount will be
    added to its existing balance.
    """
    if address in self:
        account = self[address]
        if account is not None:
            current_balance = account.balance or 0
            account.balance = ZeroPaddedHexNumber(current_balance + Number(amount))
            return
    super().__setitem__(address, Account(balance=amount))

alloc_mode(request)

Returns the allocation mode for the tests.

Source code in src/pytest_plugins/filler/pre_alloc.py
191
192
193
194
195
196
197
198
@pytest.fixture(scope="session")
def alloc_mode(request: pytest.FixtureRequest) -> AllocMode:
    """
    Returns the allocation mode for the tests.
    """
    if request.config.getoption("strict_alloc"):
        return AllocMode.STRICT
    return AllocMode.PERMISSIVE

contract_start_address(request)

Returns the starting address for contract deployment.

Source code in src/pytest_plugins/filler/pre_alloc.py
201
202
203
204
205
206
@pytest.fixture(scope="session")
def contract_start_address(request: pytest.FixtureRequest) -> int:
    """
    Returns the starting address for contract deployment.
    """
    return int(request.config.getoption("test_contract_start_address"), 0)

contract_address_increments(request)

Returns the address increment for contract deployment.

Source code in src/pytest_plugins/filler/pre_alloc.py
209
210
211
212
213
214
@pytest.fixture(scope="session")
def contract_address_increments(request: pytest.FixtureRequest) -> int:
    """
    Returns the address increment for contract deployment.
    """
    return int(request.config.getoption("test_contract_address_increments"), 0)

contract_address_iterator(contract_start_address, contract_address_increments)

Returns an iterator over contract addresses.

Source code in src/pytest_plugins/filler/pre_alloc.py
217
218
219
220
221
222
223
224
225
226
227
@pytest.fixture(scope="function")
def contract_address_iterator(
    contract_start_address: int,
    contract_address_increments: int,
) -> Iterator[Address]:
    """
    Returns an iterator over contract addresses.
    """
    return iter(
        Address(contract_start_address + (i * contract_address_increments)) for i in count()
    )

eoa_by_index(i) cached

Returns an EOA by index.

Source code in src/pytest_plugins/filler/pre_alloc.py
230
231
232
233
234
235
@cache
def eoa_by_index(i: int) -> EOA:
    """
    Returns an EOA by index.
    """
    return EOA(key=TestPrivateKey + i if i != 1 else TestPrivateKey2, nonce=0)

eoa_iterator()

Returns an iterator over EOAs copies.

Source code in src/pytest_plugins/filler/pre_alloc.py
238
239
240
241
242
243
@pytest.fixture(scope="function")
def eoa_iterator() -> Iterator[EOA]:
    """
    Returns an iterator over EOAs copies.
    """
    return iter(eoa_by_index(i).copy() for i in count())

pre(alloc_mode, contract_address_iterator, eoa_iterator)

Returns the default pre allocation for all tests (Empty alloc).

Source code in src/pytest_plugins/filler/pre_alloc.py
246
247
248
249
250
251
252
253
254
255
256
257
258
259
@pytest.fixture(scope="function")
def pre(
    alloc_mode: AllocMode,
    contract_address_iterator: Iterator[Address],
    eoa_iterator: Iterator[EOA],
) -> Alloc:
    """
    Returns the default pre allocation for all tests (Empty alloc).
    """
    return Alloc(
        alloc_mode=alloc_mode,
        contract_address_iterator=contract_address_iterator,
        eoa_iterator=eoa_iterator,
    )