Skip to content

Forks Plugin

A pytest plugin to configure the forks in the test session. It parametrizes tests based on the user-provided fork range the tests' specified validity markers.

Pytest plugin to enable fork range configuration for the test session.

pytest_addoption(parser)

Adds command-line options to pytest.

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/pytest_plugins/forks/forks.py
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def pytest_addoption(parser):
    """
    Adds command-line options to pytest.
    """
    fork_group = parser.getgroup("Forks", "Specify the fork range to generate fixtures for")
    fork_group.addoption(
        "--forks",
        action="store_true",
        dest="show_fork_help",
        default=False,
        help="Display forks supported by the test framework and exit.",
    )
    fork_group.addoption(
        "--fork",
        action="store",
        dest="single_fork",
        default=None,
        help="Only fill tests for the specified fork.",
    )
    fork_group.addoption(
        "--from",
        action="store",
        dest="forks_from",
        default=None,
        help="Fill tests from and including the specified fork.",
    )
    fork_group.addoption(
        "--until",
        action="store",
        dest="forks_until",
        default=None,
        help="Fill tests until and including the specified fork.",
    )

get_forks()

Returns a list of all the fork classes implemented by ethereum_test_forks ordered chronologically by deployment.

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/ethereum_test_forks/helpers.py
21
22
23
24
25
26
27
28
29
30
31
32
33
def get_forks() -> List[Fork]:
    """
    Returns a list of all the fork classes implemented by
    `ethereum_test_forks` ordered chronologically by deployment.
    """
    all_forks: List[Fork] = []
    for fork_name in forks.__dict__:
        fork = forks.__dict__[fork_name]
        if not isinstance(fork, type):
            continue
        if issubclass(fork, BaseFork) and fork is not BaseFork:
            all_forks.append(fork)
    return all_forks

get_deployed_forks()

Returns a list of all the fork classes implemented by ethereum_test_forks that have been deployed to mainnet, chronologically ordered by deployment.

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/ethereum_test_forks/helpers.py
36
37
38
39
40
41
def get_deployed_forks():
    """
    Returns a list of all the fork classes implemented by `ethereum_test_forks`
    that have been deployed to mainnet, chronologically ordered by deployment.
    """
    return [fork for fork in get_forks() if fork.is_deployed()]

pytest_configure(config)

Register the plugin's custom markers and process command-line options.

Custom marker registration: https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/pytest_plugins/forks/forks.py
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
    """
    Register the plugin's custom markers and process command-line options.

    Custom marker registration:
    https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
    """
    config.addinivalue_line(
        "markers",
        (
            "valid_at_transition_to(fork): specifies a test case is valid "
            "only at fork transition boundary to the specified fork"
        ),
    )
    config.addinivalue_line(
        "markers",
        "valid_from(fork): specifies from which fork a test case is valid",
    )
    config.addinivalue_line(
        "markers",
        "valid_until(fork): specifies until which fork a test case is valid",
    )

    single_fork = config.getoption("single_fork")
    forks_from = config.getoption("forks_from")
    forks_until = config.getoption("forks_until")
    show_fork_help = config.getoption("show_fork_help")

    all_forks = get_forks()
    # TODO: Tricky, this removes the *Glacier forks.
    config.all_forks = forks_from_until(all_forks[0], all_forks[-1])
    config.fork_map = {fork.name(): fork for fork in config.all_forks}
    config.fork_names = list(config.fork_map.keys())

    available_forks_help = textwrap.dedent(
        f"""\
        Available forks:
        {", ".join(config.fork_names)}
        """
    )
    available_forks_help += textwrap.dedent(
        f"""\
        Available transition forks:
        {", ".join([fork.name() for fork in get_transition_forks()])}
        """
    )
    dev_forks_help = textwrap.dedent(
        "To run tests for a fork under active development, it must be "
        "specified explicitly via --forks-until=FORK.\n"
        "Tests are only ran for deployed mainnet forks by default, i.e., "
        f"until {get_deployed_forks()[-1].name()}.\n"
    )
    if show_fork_help:
        print(available_forks_help)
        print(dev_forks_help)
        pytest.exit("After displaying help.", returncode=0)

    if single_fork and single_fork not in config.fork_map.keys():
        print("Error: Unsupported fork provided to --fork:", single_fork, "\n", file=sys.stderr)
        print(available_forks_help, file=sys.stderr)
        pytest.exit("Invalid command-line options.", returncode=pytest.ExitCode.USAGE_ERROR)

    if single_fork and (forks_from or forks_until):
        print(
            "Error: --fork cannot be used in combination with --from or --until", file=sys.stderr
        )
        pytest.exit("Invalid command-line options.", returncode=pytest.ExitCode.USAGE_ERROR)

    if single_fork:
        forks_from = single_fork
        forks_until = single_fork
    else:
        if not forks_from:
            forks_from = config.fork_names[0]
        if not forks_until:
            forks_until = get_deployed_forks()[-1].name()

    if forks_from not in config.fork_map.keys():
        print(f"Error: Unsupported fork provided to --from: {forks_from}\n", file=sys.stderr)
        print(available_forks_help, file=sys.stderr)
        pytest.exit("Invalid command-line options.", returncode=pytest.ExitCode.USAGE_ERROR)

    if forks_until not in config.fork_map.keys():
        print(f"Error: Unsupported fork provided to --until: {forks_until}\n", file=sys.stderr)
        print(available_forks_help, file=sys.stderr)
        pytest.exit("Invalid command-line options.", returncode=pytest.ExitCode.USAGE_ERROR)

    config.fork_range = config.fork_names[
        config.fork_names.index(forks_from) : config.fork_names.index(forks_until) + 1
    ]

    if not config.fork_range:
        print(
            f"Error: --from {forks_from} --until {forks_until} creates an empty fork range.",
            file=sys.stderr,
        )
        pytest.exit(
            "Command-line options produce empty fork range.",
            returncode=pytest.ExitCode.USAGE_ERROR,
        )

    # with --collect-only, we don't have access to these config options
    if config.option.collectonly:
        return
    t8n = EvmTransitionTool(
        binary=config.getoption("evm_bin"),
        trace=config.getoption("evm_collect_traces"),
    )
    unsupported_forks = [
        fork for fork in config.fork_range if not t8n.is_fork_supported(config.fork_map[fork])
    ]
    if unsupported_forks:
        print(
            "Error: The configured evm tool doesn't support the following "
            f"forks: {', '.join(unsupported_forks)}.",
            file=sys.stderr,
        )
        print(
            "\nPlease specify a version of the evm tool which supports these "
            "forks or use --until FORK to specify a supported fork.\n",
            file=sys.stderr,
        )
        pytest.exit(
            "Incompatible evm tool with fork range.", returncode=pytest.ExitCode.USAGE_ERROR
        )

get_transition_forks()

Returns all the transition forks

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/ethereum_test_forks/helpers.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def get_transition_forks() -> List[Fork]:
    """
    Returns all the transition forks
    """
    transition_forks: List[Fork] = []

    for fork_name in transition.__dict__:
        fork = transition.__dict__[fork_name]
        if not isinstance(fork, type):
            continue
        if issubclass(fork, TransitionBaseClass) and issubclass(fork, BaseFork):
            transition_forks.append(fork)

    return transition_forks

transition_fork_to(fork_to)

Returns the transition fork that transitions to the specified fork.

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/ethereum_test_forks/helpers.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
def transition_fork_to(fork_to: Fork) -> List[Fork]:
    """
    Returns the transition fork that transitions to the specified fork.
    """
    transition_forks: List[Fork] = []
    for transition_fork in get_transition_forks():
        if not issubclass(transition_fork, TransitionBaseClass):
            continue
        if transition_fork.transitions_to() == fork_to:
            transition_forks.append(transition_fork)

    return transition_forks

forks_from_until(fork_from, fork_until)

Returns the specified fork and all forks after it until and including the second specified fork

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/ethereum_test_forks/helpers.py
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
def forks_from_until(fork_from: Fork, fork_until: Fork) -> List[Fork]:
    """
    Returns the specified fork and all forks after it until and including the
    second specified fork
    """
    prev_fork = fork_until

    forks: List[Fork] = []

    while prev_fork != BaseFork and prev_fork != fork_from:
        forks.insert(0, prev_fork)

        prev_fork = prev_fork.__base__

    if prev_fork == BaseFork:
        return []

    forks.insert(0, fork_from)

    return forks

EvmTransitionTool

Bases: TransitionTool

Go-ethereum evm Transition tool frontend.

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/evm_transition_tool/__init__.py
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
class EvmTransitionTool(TransitionTool):
    """
    Go-ethereum `evm` Transition tool frontend.
    """

    binary: Path
    cached_version: Optional[str] = None
    trace: bool

    def __init__(
        self,
        binary: Optional[Path | str] = None,
        trace: bool = False,
    ):
        if binary is None:
            which_path = which("evm")
            if which_path is not None:
                binary = Path(which_path)
        if binary is None or not Path(binary).exists():
            raise Exception(
                """`evm` binary executable is not accessible, please refer to
                https://github.com/ethereum/go-ethereum on how to compile and
                install the full suite of utilities including the `evm` tool"""
            )
        self.binary = Path(binary)
        self.trace = trace
        args = [str(self.binary), "t8n", "--help"]
        try:
            result = subprocess.run(args, capture_output=True, text=True)
        except subprocess.CalledProcessError as e:
            raise Exception("evm process unexpectedly returned a non-zero status code: " f"{e}.")
        except Exception as e:
            raise Exception(f"Unexpected exception calling evm tool: {e}.")
        self.help_string = result.stdout

    def evaluate(
        self,
        alloc: Any,
        txs: Any,
        env: Any,
        fork: Fork,
        chain_id: int = 1,
        reward: int = 0,
        eips: Optional[List[int]] = None,
    ) -> Tuple[Dict[str, Any], Dict[str, Any], str]:
        """
        Executes `evm t8n` with the specified arguments.
        """
        fork_name = fork.name()
        if eips is not None:
            fork_name = "+".join([fork_name] + [str(eip) for eip in eips])

        temp_dir = tempfile.TemporaryDirectory()

        args = [
            str(self.binary),
            "t8n",
            "--input.alloc=stdin",
            "--input.txs=stdin",
            "--input.env=stdin",
            "--output.result=stdout",
            "--output.alloc=stdout",
            "--output.body=txs.rlp",
            f"--output.basedir={temp_dir.name}",
            f"--state.fork={fork_name}",
            f"--state.chainid={chain_id}",
            f"--state.reward={reward}",
        ]

        if self.trace:
            args.append("--trace")

        stdin = {
            "alloc": alloc,
            "txs": txs,
            "env": env,
        }

        encoded_input = str.encode(json.dumps(stdin))
        result = subprocess.run(
            args,
            input=encoded_input,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )

        if result.returncode != 0:
            raise Exception("failed to evaluate: " + result.stderr.decode())

        output = json.loads(result.stdout)

        if "alloc" not in output or "result" not in output:
            raise Exception("malformed result")

        with open(os.path.join(temp_dir.name, "txs.rlp"), "r") as txs_rlp_file:
            txs_rlp = txs_rlp_file.read().strip('"')

        if self.trace:
            receipts: List[Any] = output["result"]["receipts"]
            traces: List[List[Dict]] = []
            for i, r in enumerate(receipts):
                h = r["transactionHash"]
                trace_file_name = f"trace-{i}-{h}.jsonl"
                with open(os.path.join(temp_dir.name, trace_file_name), "r") as trace_file:
                    tx_traces: List[Dict] = []
                    for trace_line in trace_file.readlines():
                        tx_traces.append(json.loads(trace_line))
                    traces.append(tx_traces)
            self.append_traces(traces)

        temp_dir.cleanup()

        return (output["alloc"], output["result"], txs_rlp)

    def version(self) -> str:
        """
        Gets `evm` binary version.
        """
        if self.cached_version is None:
            result = subprocess.run(
                [str(self.binary), "-v"],
                stdout=subprocess.PIPE,
            )

            if result.returncode != 0:
                raise Exception("failed to evaluate: " + result.stderr.decode())

            self.cached_version = result.stdout.decode().strip()

        return self.cached_version

    def is_fork_supported(self, fork: Fork) -> bool:
        """
        Returns True if the fork is supported by the tool
        """
        return fork().name() in self.help_string

evaluate(alloc, txs, env, fork, chain_id=1, reward=0, eips=None)

Executes evm t8n with the specified arguments.

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/evm_transition_tool/__init__.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
def evaluate(
    self,
    alloc: Any,
    txs: Any,
    env: Any,
    fork: Fork,
    chain_id: int = 1,
    reward: int = 0,
    eips: Optional[List[int]] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any], str]:
    """
    Executes `evm t8n` with the specified arguments.
    """
    fork_name = fork.name()
    if eips is not None:
        fork_name = "+".join([fork_name] + [str(eip) for eip in eips])

    temp_dir = tempfile.TemporaryDirectory()

    args = [
        str(self.binary),
        "t8n",
        "--input.alloc=stdin",
        "--input.txs=stdin",
        "--input.env=stdin",
        "--output.result=stdout",
        "--output.alloc=stdout",
        "--output.body=txs.rlp",
        f"--output.basedir={temp_dir.name}",
        f"--state.fork={fork_name}",
        f"--state.chainid={chain_id}",
        f"--state.reward={reward}",
    ]

    if self.trace:
        args.append("--trace")

    stdin = {
        "alloc": alloc,
        "txs": txs,
        "env": env,
    }

    encoded_input = str.encode(json.dumps(stdin))
    result = subprocess.run(
        args,
        input=encoded_input,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
    )

    if result.returncode != 0:
        raise Exception("failed to evaluate: " + result.stderr.decode())

    output = json.loads(result.stdout)

    if "alloc" not in output or "result" not in output:
        raise Exception("malformed result")

    with open(os.path.join(temp_dir.name, "txs.rlp"), "r") as txs_rlp_file:
        txs_rlp = txs_rlp_file.read().strip('"')

    if self.trace:
        receipts: List[Any] = output["result"]["receipts"]
        traces: List[List[Dict]] = []
        for i, r in enumerate(receipts):
            h = r["transactionHash"]
            trace_file_name = f"trace-{i}-{h}.jsonl"
            with open(os.path.join(temp_dir.name, trace_file_name), "r") as trace_file:
                tx_traces: List[Dict] = []
                for trace_line in trace_file.readlines():
                    tx_traces.append(json.loads(trace_line))
                traces.append(tx_traces)
        self.append_traces(traces)

    temp_dir.cleanup()

    return (output["alloc"], output["result"], txs_rlp)

version()

Gets evm binary version.

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/evm_transition_tool/__init__.py
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
def version(self) -> str:
    """
    Gets `evm` binary version.
    """
    if self.cached_version is None:
        result = subprocess.run(
            [str(self.binary), "-v"],
            stdout=subprocess.PIPE,
        )

        if result.returncode != 0:
            raise Exception("failed to evaluate: " + result.stderr.decode())

        self.cached_version = result.stdout.decode().strip()

    return self.cached_version

is_fork_supported(fork)

Returns True if the fork is supported by the tool

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/evm_transition_tool/__init__.py
273
274
275
276
277
def is_fork_supported(self, fork: Fork) -> bool:
    """
    Returns True if the fork is supported by the tool
    """
    return fork().name() in self.help_string

pytest_report_header(config, start_path)

A pytest hook called to obtain the report header.

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/pytest_plugins/forks/forks.py
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
@pytest.hookimpl(trylast=True)
def pytest_report_header(config, start_path):
    """A pytest hook called to obtain the report header."""
    bold = "\033[1m"
    warning = "\033[93m"
    reset = "\033[39;49m"
    header = [
        (bold + f"Executing tests for: {', '.join(config.fork_range)} " + reset),
    ]
    if config.getoption("forks_until") is None:
        header += [
            (
                bold + warning + "Only executing tests with stable/deployed forks: "
                "Specify an upcoming fork via --until=fork to "
                "add forks under development." + reset
            )
        ]
    return header

fork(request)

Parametrize test cases by fork.

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/pytest_plugins/forks/forks.py
203
204
205
206
207
208
@pytest.fixture(autouse=True)
def fork(request):
    """
    Parametrize test cases by fork.
    """
    pass

get_validity_marker_args(metafunc, validity_marker_name, test_name)

Check and return the arguments specified to validity markers.

Check that the validity markers:

  • pytest.mark.valid_from
  • pytest.mark.valid_until
  • pytest.mark.valid_at_transition_to

are applied at most once and have been provided with exactly one argument which is a valid fork name.

Parameters:

Name Type Description Default
metafunc Metafunc

Pytest's metafunc object.

required
validity_marker_name str

Name of the validity marker to validate and return.

required
test_name str

The name of the test being parametrized by pytest_generate_tests.

required

Returns:

Type Description
str | None

The name of the fork specified to the validity marker.

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/pytest_plugins/forks/forks.py
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
def get_validity_marker_args(
    metafunc: Metafunc,
    validity_marker_name: str,
    test_name: str,
) -> str | None:
    """Check and return the arguments specified to validity markers.

    Check that the validity markers:

    - `pytest.mark.valid_from`
    - `pytest.mark.valid_until`
    - `pytest.mark.valid_at_transition_to`

    are applied at most once and have been provided with exactly one
    argument which is a valid fork name.

    Args:
        metafunc: Pytest's metafunc object.
        validity_marker_name: Name of the validity marker to validate
            and return.
        test_name: The name of the test being parametrized by
            `pytest_generate_tests`.

    Returns:
        The name of the fork specified to the validity marker.
    """
    validity_markers = [
        marker for marker in metafunc.definition.iter_markers(validity_marker_name)
    ]
    if not validity_markers:
        return None
    if len(validity_markers) > 1:
        pytest.fail(f"'{test_name}': Too many '{validity_marker_name}' markers applied to test. ")
    if len(validity_markers[0].args) == 0:
        pytest.fail(f"'{test_name}': Missing fork argument with '{validity_marker_name}' marker. ")
    if len(validity_markers[0].args) > 1:
        pytest.fail(
            f"'{test_name}': Too many arguments specified to '{validity_marker_name}' marker. "
        )
    fork_name = validity_markers[0].args[0]
    if fork_name not in metafunc.config.fork_names:  # type: ignore
        pytest.fail(
            f"'{test_name}' specifies an invalid fork '{fork_name}' to the "
            f"'{validity_marker_name}'. "
            f"List of valid forks: {', '.join(metafunc.config.fork_names)}"  # type: ignore
        )

    return fork_name

pytest_generate_tests(metafunc)

Pytest hook used to dynamically generate test cases.

Source code in /home/dtopz/code/github/danceratopz/execution-spec-tests/src/pytest_plugins/forks/forks.py
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
def pytest_generate_tests(metafunc):
    """
    Pytest hook used to dynamically generate test cases.
    """
    test_name = metafunc.function.__name__
    valid_at_transition_to = get_validity_marker_args(
        metafunc, "valid_at_transition_to", test_name
    )
    valid_from = get_validity_marker_args(metafunc, "valid_from", test_name)
    valid_until = get_validity_marker_args(metafunc, "valid_until", test_name)

    if valid_at_transition_to and valid_from:
        pytest.fail(
            f"'{test_name}': "
            "The markers 'valid_from' and 'valid_at_transition_to' can't be combined. "
        )
    if valid_at_transition_to and valid_until:
        pytest.fail(
            f"'{test_name}': "
            "The markers 'valid_until' and 'valid_at_transition_to' can't be combined. "
        )

    intersection_range = []

    if valid_at_transition_to:
        if valid_at_transition_to in metafunc.config.fork_range:
            to_fork = metafunc.config.fork_map[valid_at_transition_to]
            intersection_range = transition_fork_to(to_fork)

    else:
        if not valid_from:
            valid_from = metafunc.config.fork_names[0]

        if not valid_until:
            valid_until = metafunc.config.fork_names[-1]

        test_fork_range = set(
            metafunc.config.fork_names[
                metafunc.config.fork_names.index(valid_from) : metafunc.config.fork_names.index(
                    valid_until
                )
                + 1
            ]
        )

        if not test_fork_range:
            pytest.fail(
                "The test function's "
                f"'{test_name}' fork validity markers generate "
                "an empty fork range. Please check the arguments to its "
                f"markers:  @pytest.mark.valid_from ({valid_from}) and "
                f"@pytest.mark.valid_until ({valid_until})."
            )

        intersection_range = list(set(metafunc.config.fork_range) & test_fork_range)

        intersection_range.sort(key=metafunc.config.fork_range.index)
        intersection_range = [metafunc.config.fork_map[fork] for fork in intersection_range]

    if "fork" in metafunc.fixturenames:
        if not intersection_range:
            pytest.skip(  # this reason is not reported on the command-line
                f"{test_name} is not valid for any any of forks specified on the command-line."
            )
        else:
            metafunc.parametrize("fork", intersection_range, scope="function")