5"""Run a full tracking benchmark: simulation -> reconstruction -> metrics.
7Fires a particle gun upstream of the straw tube spectrometer (T1-T4),
8runs digitisation and reconstruction with template matching pattern
9recognition, then computes tracking performance metrics.
11Each phase (sim, reco) runs as a subprocess because FairRoot singletons
12prevent creating multiple FairRunSim instances in the same process.
15 python macro/run_tracking_benchmark.py -n 200 --seed 42 --tag test
16 python macro/run_tracking_benchmark.py -n 1000 --nTracks 5 --tag multi
19from __future__ import annotations
24from argparse import ArgumentParser
26parser = ArgumentParser(description="Tracking performance benchmark for straw tube spectrometer")
27parser.add_argument("-n", "--nEvents", type=int, default=1000, help="Number of events (default: 1000)")
28parser.add_argument("--pID", type=int, default=13, help="Particle PDG ID (default: 13, mu-)")
29parser.add_argument("--Estart", type=float, default=1.0, help="Start of energy range in GeV (default: 1)")
30parser.add_argument("--Eend", type=float, default=100.0, help="End of energy range in GeV (default: 100)")
31parser.add_argument("--Vz", type=float, default=8300.0, help="Gun z-position in cm (default: 8300, ~1m upstream of T1)")
32parser.add_argument("--Dx", type=float, default=200.0, help="Position spread in x [cm] (default: 200)")
33parser.add_argument("--Dy", type=float, default=300.0, help="Position spread in y [cm] (default: 300)")
34parser.add_argument("--nTracks", type=int, default=1, help="Tracks per event (default: 1)")
35parser.add_argument("--tag", default="benchmark", help="Output file tag (default: benchmark)")
36parser.add_argument("--output-json", default=None, help="JSON metrics output path")
37parser.add_argument("--seed", type=int, default=42, help="Random seed (default: 42)")
38parser.add_argument("-o", "--outputDir", default=".", help="Output directory (default: .)")
44 help=
"FairLogger verbosity: 0=info, 1=debug, 2=debug1, 3=debug2",
47options = parser.parse_args()
49if not os.path.exists(options.outputDir):
50 os.makedirs(options.outputDir)
53sim_file = f
"{options.outputDir}/sim_{tag}.root"
54geo_file = f
"{options.outputDir}/geo_{tag}.root"
55reco_file = f
"{options.outputDir}/sim_{tag}_rec.root"
56json_file = options.output_json
or f
"{options.outputDir}/tracking_metrics.json"
57histo_file = f
"{options.outputDir}/tracking_benchmark_histos.root"
59fairship = os.environ.get(
"FAIRSHIP",
"")
62def run_phase(description: str, cmd: list[str]) ->
None:
63 """Run a subprocess phase, raising on failure."""
65 print(f
"{description}")
67 result = subprocess.run(cmd, check=
False)
68 if result.returncode != 0:
69 print(f
"FAILED: {description} (exit code {result.returncode})")
70 sys.exit(result.returncode)
76sim_script = os.path.join(fairship,
"macro",
"run_simScript.py")
if fairship
else "macro/run_simScript.py"
114if not os.path.exists(sim_file):
115 print(f
"ERROR: Simulation output {sim_file} not found")
121reco_script = os.path.join(fairship,
"macro",
"ShipReco.py")
if fairship
else "macro/ShipReco.py"
130 str(options.nEvents),
135 reco_cmd.append(
"--Debug")
137run_phase(
"Phase 2: Reconstruction", reco_cmd)
139if not os.path.exists(reco_file):
140 print(f
"ERROR: Reconstruction output {reco_file} not found")
147print(
"Phase 3: Benchmark Metrics")
150import tracking_benchmark
153bench.compute_metrics()
155bench.save_json(json_file)
156bench.save_histograms(histo_file)
159print(
"Tracking benchmark complete.")
160print(f
" Metrics: {json_file}")
161print(f
" Histograms: {histo_file}")
None run_phase(str description, list[str] cmd)