mirror of
https://github.com/saymrwulf/QuantumLearning.git
synced 2026-05-14 20:58:00 +00:00
2630 lines
131 KiB
Python
2630 lines
131 KiB
Python
#!/usr/bin/env python3
|
|
from __future__ import annotations
|
|
|
|
import hashlib
|
|
import json
|
|
from pathlib import Path
|
|
from textwrap import dedent
|
|
|
|
|
|
ROOT = Path(__file__).resolve().parents[1]
|
|
NOTEBOOKS = ROOT / "notebooks"
|
|
MODULE_01_DIR = NOTEBOOKS / "professional" / "module_01_qiskit_patterns"
|
|
MODULE_02_DIR = NOTEBOOKS / "professional" / "module_02_hardware_aware_redesign"
|
|
MODULE_03_DIR = NOTEBOOKS / "professional" / "module_03_noise_aware_verification"
|
|
MODULE_04_DIR = NOTEBOOKS / "professional" / "module_04_capstone_design_review"
|
|
|
|
|
|
def markdown_cell(text: str) -> dict:
|
|
cleaned = "\n".join(line.rstrip() for line in dedent(text).strip("\n").splitlines())
|
|
return {
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [line + "\n" for line in cleaned.splitlines()],
|
|
}
|
|
|
|
|
|
def code_cell(source: str) -> dict:
|
|
cleaned = dedent(source).strip("\n")
|
|
return {
|
|
"cell_type": "code",
|
|
"execution_count": None,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [line + "\n" for line in cleaned.splitlines()],
|
|
}
|
|
|
|
|
|
def _cell_id(index: int, cell: dict) -> str:
|
|
source = "".join(cell.get("source", []))
|
|
digest = hashlib.sha1(f"{cell.get('cell_type', 'cell')}:{index}:{source}".encode()).hexdigest()
|
|
return digest[:8]
|
|
|
|
|
|
def notebook(cells: list[dict]) -> dict:
|
|
normalized_cells = []
|
|
for index, cell in enumerate(cells):
|
|
payload = dict(cell)
|
|
payload.setdefault("id", _cell_id(index, payload))
|
|
normalized_cells.append(payload)
|
|
return {
|
|
"cells": normalized_cells,
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "QuantumLearning (.venv)",
|
|
"language": "python",
|
|
"name": "quantum-learning",
|
|
},
|
|
"language_info": {
|
|
"name": "python",
|
|
"version": "3.12",
|
|
},
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5,
|
|
}
|
|
|
|
|
|
def write_notebook(path: Path, payload: dict) -> None:
|
|
path.parent.mkdir(parents=True, exist_ok=True)
|
|
path.write_text(json.dumps(payload, indent=2) + "\n")
|
|
|
|
|
|
def quiz_code(questions: list[dict], heading: str) -> dict:
|
|
return code_cell(f"quiz_block({questions!r}, heading={heading!r})")
|
|
|
|
|
|
def reflection_code(prompt: str) -> dict:
|
|
return code_cell(f"reflection_box({prompt!r})")
|
|
|
|
|
|
def compose_code(*blocks: str) -> str:
|
|
cleaned_blocks = []
|
|
for block in blocks:
|
|
cleaned = dedent(block).strip("\n")
|
|
if cleaned:
|
|
cleaned_blocks.append(cleaned)
|
|
return "\n\n".join(cleaned_blocks)
|
|
|
|
|
|
def rubric_code(rubric_id: str, title: str) -> dict:
|
|
return code_cell(
|
|
f"""
|
|
assessment_blueprint = load_assessment_blueprint()
|
|
rubric_scorecard(
|
|
assessment_blueprint.get_rubric({rubric_id!r}),
|
|
title={title!r},
|
|
)
|
|
"""
|
|
)
|
|
|
|
|
|
def feedback_panel_code(title: str, prompt: str) -> dict:
|
|
return code_cell(
|
|
f"feedback_iteration_panel(title={title!r}, prompt={prompt!r})"
|
|
)
|
|
|
|
|
|
def checklist_code(items: list[str], title: str) -> dict:
|
|
return code_cell(f"evidence_checklist({items!r}, title={title!r})")
|
|
|
|
|
|
def editable_lab_code(
|
|
initial_code: str,
|
|
*,
|
|
title: str,
|
|
instructions: str,
|
|
context_source: str = "",
|
|
context_name: str = "simulate_counts",
|
|
shots: int = 256,
|
|
) -> dict:
|
|
return code_cell(
|
|
compose_code(
|
|
context_source,
|
|
f"""
|
|
editable_code = {initial_code!r}
|
|
editable_circuit_lab(
|
|
initial_code=editable_code,
|
|
context={{"QuantumCircuit": QuantumCircuit, "simulate_counts": {context_name}}},
|
|
title={title!r},
|
|
instructions={instructions!r},
|
|
shots={shots},
|
|
)
|
|
""",
|
|
)
|
|
)
|
|
|
|
|
|
SETUP = """
|
|
from pathlib import Path
|
|
import sys
|
|
|
|
project_root = Path.cwd().resolve()
|
|
while not (project_root / "pyproject.toml").exists():
|
|
if project_root.parent == project_root:
|
|
raise RuntimeError("Could not locate the project root from this notebook.")
|
|
project_root = project_root.parent
|
|
|
|
src_path = project_root / "src"
|
|
if str(src_path) not in sys.path:
|
|
sys.path.insert(0, str(src_path))
|
|
"""
|
|
|
|
|
|
COMMON_IMPORTS = """
|
|
from quantum_learning import (
|
|
build_demo_noise_model,
|
|
counts_to_probabilities,
|
|
draw_circuit,
|
|
editable_circuit_lab,
|
|
evidence_checklist,
|
|
feedback_iteration_panel,
|
|
line_coupling_map,
|
|
load_assessment_blueprint,
|
|
plot_counts,
|
|
plot_probabilities,
|
|
quiz_block,
|
|
reflection_box,
|
|
rubric_scorecard,
|
|
simulate_counts,
|
|
statevector_probabilities,
|
|
step_reference_table,
|
|
transpile_summary,
|
|
)
|
|
from qiskit import QuantumCircuit
|
|
from qiskit.providers.basic_provider import BasicSimulator
|
|
"""
|
|
|
|
|
|
GENERAL_REVIEW_CHECKLIST = [
|
|
"The notebook states the objective or decision clearly.",
|
|
"The relevant constraints are explicit and stable.",
|
|
"Evidence is tied to a concrete circuit, output, or metric.",
|
|
"At least one uncertainty or risk is named explicitly.",
|
|
"The notebook ends with a next action or defended judgement.",
|
|
]
|
|
|
|
|
|
CAPSTONE_REVIEW_CHECKLIST = [
|
|
"The design brief states both objective and local constraints.",
|
|
"At least two plausible candidate circuits are compared.",
|
|
"Ideal evidence is shown before constrained claims are made.",
|
|
"Compiled burden is compared under a fixed local constraint model.",
|
|
"Noise-aware evidence is compared under a declared local model.",
|
|
"The notebook ends with an explicit recommendation.",
|
|
"Residual risk or limits are named honestly.",
|
|
"Another engineer could reproduce the comparison from the notebook.",
|
|
]
|
|
|
|
|
|
PATTERNS_STEP_REFS = [
|
|
{
|
|
"marker": "[1]",
|
|
"code_focus": "Treat the question being asked of the circuit as configuration, not as a hidden side effect.",
|
|
"diagram_effect": "The rendered circuit becomes one phase of a workflow rather than the whole workflow.",
|
|
"why_it_matters": "Professional notebooks separate design brief, circuit body, and evaluation record.",
|
|
},
|
|
{
|
|
"marker": "[2]",
|
|
"code_focus": "Keep basis adaptation or other query-specific logic in a clearly named layer.",
|
|
"diagram_effect": "The middle of the circuit shows which part is core routine and which part customizes the question.",
|
|
"why_it_matters": "Patterns become reusable only when stable and variable regions are visibly distinct.",
|
|
},
|
|
{
|
|
"marker": "[3]",
|
|
"code_focus": "Make the reporting contract explicit instead of relying on later guesswork.",
|
|
"diagram_effect": "The diagram ends with an auditable readout layer.",
|
|
"why_it_matters": "Workflow quality includes trustworthy evidence, not only a correct quantum body.",
|
|
},
|
|
{
|
|
"marker": "[4]",
|
|
"code_focus": "Pair the circuit with post-processing that produces a reviewable record rather than an isolated histogram.",
|
|
"diagram_effect": "The circuit is now visibly part of a larger map-optimize-execute-post-process cycle.",
|
|
"why_it_matters": "Patterns matter when circuits live inside applications, experiments, and reports.",
|
|
},
|
|
]
|
|
|
|
|
|
PATTERNS_ANCHOR = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def build_agreement_probe(basis: str = "z") -> QuantumCircuit:
|
|
circuit = QuantumCircuit(2, 2)
|
|
# [1] The workflow chooses the question before the circuit is built.
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
# [2] Basis adaptation belongs in its own visible layer.
|
|
if basis == "x":
|
|
circuit.h([0, 1])
|
|
elif basis != "z":
|
|
raise ValueError("basis must be 'z' or 'x'")
|
|
# [3] Keep the reporting contract explicit.
|
|
circuit.measure([0, 1], [0, 1])
|
|
return circuit
|
|
|
|
circuit = build_agreement_probe(basis="x")
|
|
"""
|
|
|
|
|
|
PATTERNS_CONFIG_EDITABLE = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def build_probe(basis: str = "z", add_barrier: bool = True) -> QuantumCircuit:
|
|
circuit = QuantumCircuit(2, 2)
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
if add_barrier:
|
|
circuit.barrier()
|
|
if basis == "x":
|
|
circuit.h([0, 1])
|
|
elif basis != "z":
|
|
raise ValueError("basis must be 'z' or 'x'")
|
|
circuit.measure([0, 1], [0, 1])
|
|
return circuit
|
|
|
|
circuit = build_probe(basis="z", add_barrier=True)
|
|
"""
|
|
|
|
|
|
PATTERNS_REPORTING_EDITABLE = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def build_probe(swapped_report: bool = False) -> QuantumCircuit:
|
|
circuit = QuantumCircuit(2, 2)
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
if swapped_report:
|
|
circuit.measure([0, 1], [1, 0])
|
|
else:
|
|
circuit.measure([0, 1], [0, 1])
|
|
return circuit
|
|
|
|
circuit = build_probe(swapped_report=False)
|
|
"""
|
|
|
|
|
|
LINE_CONTEXT = """
|
|
LOCAL_BASIS = ["rz", "sx", "x", "cx"]
|
|
|
|
def simulate_line_counts(circuit, shots=256):
|
|
return simulate_counts(
|
|
circuit,
|
|
shots=shots,
|
|
basis_gates=LOCAL_BASIS,
|
|
coupling_map=line_coupling_map(circuit.num_qubits),
|
|
optimization_level=1,
|
|
)
|
|
"""
|
|
|
|
|
|
HARDWARE_STEP_REFS = [
|
|
{
|
|
"marker": "[1]",
|
|
"code_focus": "Write the abstract target circuit clearly before worrying about rescue by the transpiler.",
|
|
"diagram_effect": "The first diagram states the ideal intent with no attempt to hide topology pressure.",
|
|
"why_it_matters": "Human redesign starts by knowing what the unpressured circuit was trying to express.",
|
|
},
|
|
{
|
|
"marker": "[2]",
|
|
"code_focus": "Expose basis-gate and coupling-map assumptions explicitly.",
|
|
"diagram_effect": "The device pressure becomes a visible part of the experiment rather than a hidden environment fact.",
|
|
"why_it_matters": "You cannot reason about hardware cost if the constraints remain implicit.",
|
|
},
|
|
{
|
|
"marker": "[3]",
|
|
"code_focus": "Inspect the compiled rewrite and decide which costs were caused by your abstract layout.",
|
|
"diagram_effect": "The compiled version shows extra structure, depth, or routing burden.",
|
|
"why_it_matters": "Professional redesign begins where default compilation becomes expensive.",
|
|
},
|
|
{
|
|
"marker": "[4]",
|
|
"code_focus": "Create and compare a human-aware alternative instead of treating the compiler as the final word.",
|
|
"diagram_effect": "A second circuit appears whose structure anticipates the topology rather than suffering under it.",
|
|
"why_it_matters": "Hardware awareness is redesign, not passive observation of transpiler output.",
|
|
},
|
|
]
|
|
|
|
|
|
HARDWARE_ANCHOR = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def naive_star_ghz() -> QuantumCircuit:
|
|
circuit = QuantumCircuit(4, 4)
|
|
# [1] Express the ideal entangling target directly.
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.cx(0, 2)
|
|
circuit.cx(0, 3)
|
|
# [2] Keep the evidence path stable while topology pressure is studied elsewhere.
|
|
circuit.measure([0, 1, 2, 3], [0, 1, 2, 3])
|
|
return circuit
|
|
|
|
circuit = naive_star_ghz()
|
|
"""
|
|
|
|
|
|
HARDWARE_MANUAL_EDITABLE = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def line_friendly_ghz() -> QuantumCircuit:
|
|
circuit = QuantumCircuit(4, 4)
|
|
circuit.h(1)
|
|
circuit.cx(1, 0)
|
|
circuit.cx(1, 2)
|
|
circuit.cx(2, 3)
|
|
circuit.measure([0, 1, 2, 3], [0, 1, 2, 3])
|
|
return circuit
|
|
|
|
circuit = line_friendly_ghz()
|
|
"""
|
|
|
|
|
|
HARDWARE_COMPARE_EDITABLE = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def candidate(style: str = "middle_root") -> QuantumCircuit:
|
|
circuit = QuantumCircuit(4, 4)
|
|
if style == "naive":
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.cx(0, 2)
|
|
circuit.cx(0, 3)
|
|
elif style == "chain":
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.cx(1, 2)
|
|
circuit.cx(2, 3)
|
|
elif style == "middle_root":
|
|
circuit.h(1)
|
|
circuit.cx(1, 0)
|
|
circuit.cx(1, 2)
|
|
circuit.cx(2, 3)
|
|
else:
|
|
raise ValueError("style must be naive, chain, or middle_root")
|
|
circuit.measure([0, 1, 2, 3], [0, 1, 2, 3])
|
|
return circuit
|
|
|
|
circuit = candidate(style="middle_root")
|
|
"""
|
|
|
|
|
|
NOISE_CONTEXT = """
|
|
demo_noise = build_demo_noise_model(
|
|
single_qubit_error=0.01,
|
|
two_qubit_error=0.05,
|
|
readout_error=0.03,
|
|
)
|
|
|
|
def simulate_noisy_counts(circuit, shots=256):
|
|
return simulate_counts(circuit, shots=shots, noise_model=demo_noise)
|
|
"""
|
|
|
|
|
|
NOISE_STEP_REFS = [
|
|
{
|
|
"marker": "[1]",
|
|
"code_focus": "State an invariant or expected signature before you look at noisy data.",
|
|
"diagram_effect": "The circuit is linked to a claim, not only to a plot.",
|
|
"why_it_matters": "Verification begins with a falsifiable expectation, not with vibes about whether the histogram looks plausible.",
|
|
},
|
|
{
|
|
"marker": "[2]",
|
|
"code_focus": "Create an ideal reference case with a clear evidence path.",
|
|
"diagram_effect": "The clean diagram becomes the baseline against which distortion is interpreted.",
|
|
"why_it_matters": "Without an ideal baseline, noise and design bugs get mixed together.",
|
|
},
|
|
{
|
|
"marker": "[3]",
|
|
"code_focus": "Inject or study noise while preserving the same reporting contract.",
|
|
"diagram_effect": "The circuit body stays recognizably the same while the output distribution degrades.",
|
|
"why_it_matters": "Good diagnosis compares like with like.",
|
|
},
|
|
{
|
|
"marker": "[4]",
|
|
"code_focus": "Use invariants, filters, or postselection to separate structural failure from expected physical distortion.",
|
|
"diagram_effect": "The notebook now includes verification logic alongside execution.",
|
|
"why_it_matters": "Mitigation thinking starts with diagnosis, not magical hope.",
|
|
},
|
|
]
|
|
|
|
|
|
NOISE_ANCHOR = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def bell_candidate(bug: bool = False) -> QuantumCircuit:
|
|
circuit = QuantumCircuit(2, 2)
|
|
# [1] Toggle only one potential design defect at a time.
|
|
if not bug:
|
|
circuit.h(0)
|
|
# [2] Correlate the second wire so the intended support is {00, 11}.
|
|
circuit.cx(0, 1)
|
|
# [3] Keep the reporting layer explicit and stable.
|
|
circuit.barrier()
|
|
# [4] Measure both outputs so invariants can inspect the evidence.
|
|
circuit.measure([0, 1], [0, 1])
|
|
return circuit
|
|
|
|
circuit = bell_candidate(bug=False)
|
|
"""
|
|
|
|
|
|
NOISE_BUG_EDITABLE = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def bell_candidate(bug: bool = True) -> QuantumCircuit:
|
|
circuit = QuantumCircuit(2, 2)
|
|
if not bug:
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.measure([0, 1], [0, 1])
|
|
return circuit
|
|
|
|
circuit = bell_candidate(bug=True)
|
|
"""
|
|
|
|
|
|
NOISE_FILTER_EDITABLE = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def bell_candidate() -> QuantumCircuit:
|
|
circuit = QuantumCircuit(2, 2)
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.measure([0, 1], [0, 1])
|
|
return circuit
|
|
|
|
circuit = bell_candidate()
|
|
"""
|
|
|
|
|
|
CAPSTONE_CONTEXT = """
|
|
LOCAL_BASIS = ["rz", "sx", "x", "cx"]
|
|
capstone_noise = build_demo_noise_model(
|
|
single_qubit_error=0.01,
|
|
two_qubit_error=0.04,
|
|
readout_error=0.02,
|
|
)
|
|
|
|
def simulate_capstone_counts(circuit, shots=256):
|
|
return simulate_counts(
|
|
circuit,
|
|
shots=shots,
|
|
noise_model=capstone_noise,
|
|
basis_gates=LOCAL_BASIS,
|
|
coupling_map=line_coupling_map(circuit.num_qubits),
|
|
optimization_level=1,
|
|
)
|
|
"""
|
|
|
|
|
|
CAPSTONE_STEP_REFS = [
|
|
{
|
|
"marker": "[1]",
|
|
"code_focus": "State the design brief and the constraints before generating any candidate.",
|
|
"diagram_effect": "Every candidate diagram is read as an answer to a shared constrained question.",
|
|
"why_it_matters": "Capstone work starts from explicit objective, not from attachment to a favorite circuit shape.",
|
|
},
|
|
{
|
|
"marker": "[2]",
|
|
"code_focus": "Generate more than one plausible candidate family.",
|
|
"diagram_effect": "The notebook contains alternatives instead of a single self-congratulatory path.",
|
|
"why_it_matters": "Professional design requires comparison, not only construction.",
|
|
},
|
|
{
|
|
"marker": "[3]",
|
|
"code_focus": "Benchmark ideal, compiled, and noisy behavior with the same reporting contract.",
|
|
"diagram_effect": "Each candidate can be judged across several evidence layers.",
|
|
"why_it_matters": "Recommendations are credible only when they survive more than one lens.",
|
|
},
|
|
{
|
|
"marker": "[4]",
|
|
"code_focus": "Write a recommendation that names tradeoffs, risks, and why one candidate wins.",
|
|
"diagram_effect": "The notebook ends as a design review, not a gallery.",
|
|
"why_it_matters": "The capstone is about defended judgment under constraints.",
|
|
},
|
|
]
|
|
|
|
|
|
CAPSTONE_ANCHOR = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def ghz_candidate(style: str = "middle_root") -> QuantumCircuit:
|
|
circuit = QuantumCircuit(3, 3)
|
|
if style == "naive":
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.cx(0, 2)
|
|
elif style == "chain":
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.cx(1, 2)
|
|
elif style == "middle_root":
|
|
circuit.h(1)
|
|
circuit.cx(1, 0)
|
|
circuit.cx(1, 2)
|
|
else:
|
|
raise ValueError("style must be naive, chain, or middle_root")
|
|
circuit.measure([0, 1, 2], [0, 1, 2])
|
|
return circuit
|
|
|
|
circuit = ghz_candidate(style="middle_root")
|
|
"""
|
|
|
|
|
|
CAPSTONE_COMPARE_EDITABLE = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def ghz_candidate(style: str = "chain") -> QuantumCircuit:
|
|
circuit = QuantumCircuit(3, 3)
|
|
if style == "naive":
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.cx(0, 2)
|
|
elif style == "chain":
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.cx(1, 2)
|
|
elif style == "middle_root":
|
|
circuit.h(1)
|
|
circuit.cx(1, 0)
|
|
circuit.cx(1, 2)
|
|
else:
|
|
raise ValueError("style must be naive, chain, or middle_root")
|
|
circuit.measure([0, 1, 2], [0, 1, 2])
|
|
return circuit
|
|
|
|
circuit = ghz_candidate(style="chain")
|
|
"""
|
|
|
|
|
|
CAPSTONE_REVIEW_EDITABLE = """
|
|
from qiskit import QuantumCircuit
|
|
|
|
def ghz_candidate(style: str = "middle_root", add_extra_layer: bool = False) -> QuantumCircuit:
|
|
circuit = QuantumCircuit(3, 3)
|
|
if style == "chain":
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.cx(1, 2)
|
|
else:
|
|
circuit.h(1)
|
|
circuit.cx(1, 0)
|
|
circuit.cx(1, 2)
|
|
if add_extra_layer:
|
|
circuit.cz(0, 2)
|
|
circuit.measure([0, 1, 2], [0, 1, 2])
|
|
return circuit
|
|
|
|
circuit = ghz_candidate(style="middle_root", add_extra_layer=False)
|
|
"""
|
|
|
|
|
|
PATTERNS_QUIZ_A = [
|
|
{
|
|
"prompt": "What is the main reason to separate workflow configuration from the circuit builder?",
|
|
"options": [
|
|
"So the stable quantum routine and the changing experimental question can be reviewed independently",
|
|
"So the circuit always uses fewer gates",
|
|
"So Qiskit can avoid classical data structures",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Pattern design depends on making stable and variable responsibilities visible.",
|
|
},
|
|
{
|
|
"prompt": "Why is an explicit reporting contract part of a pattern notebook?",
|
|
"options": [
|
|
"Because the evidence layer is part of the design, not a last-minute add-on",
|
|
"Because all patterns must measure every qubit twice",
|
|
"Because plotting replaces analysis",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Professional workflow design includes trustworthy readout and post-processing.",
|
|
},
|
|
{
|
|
"prompt": "What makes a notebook pattern reusable?",
|
|
"options": [
|
|
"It keeps intent, circuit body, and evaluation record distinct enough to vary inputs without losing the story",
|
|
"It hides the circuit inside a large helper",
|
|
"It avoids any post-processing code",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Reusable patterns survive variation because their boundaries are clear.",
|
|
},
|
|
]
|
|
|
|
|
|
PATTERNS_QUIZ_B = [
|
|
{
|
|
"prompt": "What should a workflow record contain beyond raw counts?",
|
|
"options": [
|
|
"The design brief, circuit or compile context, and a derived decision or score",
|
|
"Only the screenshot of the circuit",
|
|
"Only the token URL of the notebook server",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Pattern notebooks need enough context to be reviewed later.",
|
|
},
|
|
{
|
|
"prompt": "Why is 'the code runs' a weak endpoint for this module?",
|
|
"options": [
|
|
"Because workflow design is about repeatable evidence and interfaces, not only execution success",
|
|
"Because workflows should never execute locally",
|
|
"Because classical preprocessing is irrelevant",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "A runnable cell is not yet a professional workflow artifact.",
|
|
},
|
|
]
|
|
|
|
|
|
PATTERNS_LAB_QUIZ_A = [
|
|
{
|
|
"prompt": "What is the safest way to edit a workflow-pattern notebook?",
|
|
"options": [
|
|
"Change one boundary at a time and check what remained stable in the result record",
|
|
"Change the builder, scoring, and measurement map all at once",
|
|
"Ignore the report and watch only the circuit drawing",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Controlled edits are how workflow boundaries become understandable.",
|
|
},
|
|
{
|
|
"prompt": "What does a swapped measurement map threaten first?",
|
|
"options": [
|
|
"The interface between the circuit and its post-processing logic",
|
|
"The existence of the entangling gates",
|
|
"The ability to render the circuit",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Reporting order is part of the workflow contract.",
|
|
},
|
|
]
|
|
|
|
|
|
PATTERNS_LAB_QUIZ_B = [
|
|
{
|
|
"prompt": "Why compare a lean pattern and a more structured pattern if both run?",
|
|
"options": [
|
|
"To judge which one keeps intent, evidence, and variation pressure most legible",
|
|
"Because one of them must be mathematically wrong",
|
|
"Because Qiskit requires barriers in all workflows",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Pattern quality is largely about reviewability under change.",
|
|
},
|
|
{
|
|
"prompt": "What would make a pattern helper too clever?",
|
|
"options": [
|
|
"It hides the question being asked and the reporting contract behind opaque defaults",
|
|
"It includes one short helper function",
|
|
"It uses a dataclass for configuration",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Pattern abstraction should reduce clutter without erasing the design brief.",
|
|
},
|
|
]
|
|
|
|
|
|
PATTERNS_PROBLEM_SETS = [
|
|
(
|
|
"Workflow Boundaries",
|
|
[
|
|
{
|
|
"prompt": "Which comment is strongest in a workflow review?",
|
|
"options": [
|
|
"The pattern hides where the question changes and where the circuit stays stable",
|
|
"The circuit is quantum enough",
|
|
"Please make the code shorter regardless of meaning",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Workflow patterns are judged by the clarity of their boundaries.",
|
|
},
|
|
{
|
|
"prompt": "What belongs outside the quantum routine in a good pattern?",
|
|
"options": [
|
|
"Configuration of the experimental question and downstream scoring choices",
|
|
"All measurement logic",
|
|
"Every explanation cell",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The routine should stay stable while the surrounding workflow varies.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Evidence Records",
|
|
[
|
|
{
|
|
"prompt": "Why is a derived score often more useful than raw counts alone?",
|
|
"options": [
|
|
"It makes the decision rule explicit and reviewable",
|
|
"It removes the need to keep counts",
|
|
"It always improves accuracy",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Good post-processing clarifies what the workflow is claiming.",
|
|
},
|
|
{
|
|
"prompt": "What is the danger of no recorded compile or execution context?",
|
|
"options": [
|
|
"A later reader cannot reconstruct which environment assumptions shaped the result",
|
|
"The circuit stops being unitary",
|
|
"The notebook cannot include plots",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Pattern notebooks should preserve enough context for re-interpretation and critique.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Variation Pressure",
|
|
[
|
|
{
|
|
"prompt": "What tests whether a pattern is real rather than accidental?",
|
|
"options": [
|
|
"Changing a small input condition and seeing whether the stable routine still makes sense",
|
|
"Adding more notebook themes",
|
|
"Removing every helper function",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Patterns become visible under controlled variation.",
|
|
},
|
|
{
|
|
"prompt": "Why do pattern notebooks belong in the professional band?",
|
|
"options": [
|
|
"Because circuits in real projects live inside larger workflows, not alone",
|
|
"Because basic gates no longer matter",
|
|
"Because algorithms are no longer relevant",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Professional work embeds quantum routines inside a broader engineering loop.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Review Language",
|
|
[
|
|
{
|
|
"prompt": "Which review statement is strongest?",
|
|
"options": [
|
|
"The scoring logic depends on a readout order that the builder never states explicitly",
|
|
"The diagram looks nice",
|
|
"The notebook should remove every markdown explanation",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The issue is the hidden interface assumption between circuit and workflow record.",
|
|
},
|
|
{
|
|
"prompt": "What should a good workflow pattern survive?",
|
|
"options": [
|
|
"Controlled changes in inputs, basis choice, and scoring emphasis without losing clarity",
|
|
"Only the exact original notebook cell order",
|
|
"The absence of any documentation",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Patterns are valuable because they handle variation gracefully.",
|
|
},
|
|
],
|
|
),
|
|
]
|
|
|
|
|
|
PATTERNS_STUDIO_QUIZ = [
|
|
{
|
|
"prompt": "What is a strong studio deliverable for this module?",
|
|
"options": [
|
|
"A small, configurable workflow notebook with a clear circuit builder and a reviewable result record",
|
|
"A single screenshot of one circuit",
|
|
"A notebook with no post-processing because counts are enough",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The studio should produce a reusable local-first pattern artifact.",
|
|
},
|
|
{
|
|
"prompt": "Why is explicit workflow prose still necessary here?",
|
|
"options": [
|
|
"Because the professional value of the notebook lies in the interfaces and decision logic as much as in the circuit",
|
|
"Because the circuit is unimportant",
|
|
"Because prose replaces execution",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The prose tells reviewers how the pattern is meant to be used and judged.",
|
|
},
|
|
]
|
|
|
|
|
|
HARDWARE_QUIZ_A = [
|
|
{
|
|
"prompt": "What is the first mistake hardware-aware redesign tries to prevent?",
|
|
"options": [
|
|
"Letting the transpiler quietly rescue a topology-hostile abstract circuit without inspecting the cost",
|
|
"Using any two-qubit gate at all",
|
|
"Running local simulation before cloud execution",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Hardware awareness begins by exposing compilation pressure, not ignoring it.",
|
|
},
|
|
{
|
|
"prompt": "Why must coupling maps be explicit in a redesign notebook?",
|
|
"options": [
|
|
"Because the constraints shape what counts as a good circuit",
|
|
"Because they increase the number of qubits in the circuit",
|
|
"Because Qiskit cannot run without them",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The whole redesign question depends on visible constraints.",
|
|
},
|
|
]
|
|
|
|
|
|
HARDWARE_QUIZ_B = [
|
|
{
|
|
"prompt": "What makes a manual redesign better than passive transpiler inspection?",
|
|
"options": [
|
|
"It anticipates the topology in the abstract design instead of paying routing cost after the fact",
|
|
"It removes the need to benchmark",
|
|
"It always lowers gate counts to the theoretical minimum",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Human redesign aims to reduce avoidable compiler burden.",
|
|
},
|
|
{
|
|
"prompt": "Why keep the reporting layer stable while redesigning the entangling body?",
|
|
"options": [
|
|
"So behavioral comparisons stay fair while structure changes",
|
|
"Because classical bits cannot be moved in Qiskit",
|
|
"Because metrics only work on measured circuits",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "A stable objective is necessary for meaningful redesign comparison.",
|
|
},
|
|
]
|
|
|
|
|
|
HARDWARE_LAB_QUIZ_A = [
|
|
{
|
|
"prompt": "What should you inspect first when a compiled circuit gets much deeper?",
|
|
"options": [
|
|
"Which abstract interactions violated the declared topology",
|
|
"The notebook font",
|
|
"Whether the simulator seed changed",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Depth inflation is usually tracing back to topology or basis pressure.",
|
|
},
|
|
{
|
|
"prompt": "Why compare a naive star GHZ to a middle-root or chain version?",
|
|
"options": [
|
|
"To see whether human-aware structure can reduce routing cost before compilation",
|
|
"Because only one can be entangling",
|
|
"Because the transpiler refuses to compile chain circuits",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The comparison reveals which costs were self-inflicted by the abstract layout.",
|
|
},
|
|
]
|
|
|
|
|
|
HARDWARE_LAB_QUIZ_B = [
|
|
{
|
|
"prompt": "What is a strong redesign note?",
|
|
"options": [
|
|
"I moved the root to the middle because the line topology can host those interactions more directly",
|
|
"I changed the circuit because it looked simpler",
|
|
"I let the compiler decide and stopped reading",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Good redesign language ties structure to topology-aware intent.",
|
|
},
|
|
{
|
|
"prompt": "Why is it useful to keep basis gates fixed during candidate comparison?",
|
|
"options": [
|
|
"So differences in cost are more attributable to circuit design than to a moving target backend model",
|
|
"Because basis gates never matter",
|
|
"Because fixed basis gates guarantee identical counts",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Stable constraints make comparison more interpretable.",
|
|
},
|
|
]
|
|
|
|
|
|
HARDWARE_PROBLEM_SETS = [
|
|
(
|
|
"Constraint Reading",
|
|
[
|
|
{
|
|
"prompt": "Which question belongs first in a hardware-aware review?",
|
|
"options": [
|
|
"Which interactions in the abstract circuit stress the stated topology?",
|
|
"Which color theme does the notebook use?",
|
|
"How many markdown cells are present?",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Redesign starts with locating the structural source of cost.",
|
|
},
|
|
{
|
|
"prompt": "Why is a coupling map a design object here?",
|
|
"options": [
|
|
"Because it changes what circuit structure is cheap, awkward, or compiler-rescued",
|
|
"Because it determines the correct measurement result directly",
|
|
"Because it replaces basis gates",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The topology reshapes the engineering trade space.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Compiler Versus Designer",
|
|
[
|
|
{
|
|
"prompt": "What is the weak attitude this module opposes?",
|
|
"options": [
|
|
"The transpiler will fix everything, so I do not need to read the compiled circuit",
|
|
"I should compare alternative layouts",
|
|
"I should measure cost with the same constraints across candidates",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Passive trust in compilation is not hardware-aware design.",
|
|
},
|
|
{
|
|
"prompt": "What is a strong reason to inspect `depth_after` and `ops_after` together?",
|
|
"options": [
|
|
"They tell a more complete story about route-induced structural cost",
|
|
"They are interchangeable numbers",
|
|
"They eliminate the need to see the compiled circuit",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Multiple metrics help connect design moves to concrete burden.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Manual Alternatives",
|
|
[
|
|
{
|
|
"prompt": "What makes a manual alternative worth keeping?",
|
|
"options": [
|
|
"It preserves the objective while reducing avoidable compile-time pressure",
|
|
"It simply has fewer lines of Python",
|
|
"It hides more logic inside helpers",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The best redesigns keep behavior while improving structural fit.",
|
|
},
|
|
{
|
|
"prompt": "Why might a middle-root GHZ be better than a star-root GHZ on a line?",
|
|
"options": [
|
|
"Because the central qubit can reach both sides with less routing strain",
|
|
"Because middle qubits are more quantum",
|
|
"Because it avoids measurement entirely",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Positioning the root can align the abstract circuit with the topology.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Review Language",
|
|
[
|
|
{
|
|
"prompt": "Which review statement is strongest?",
|
|
"options": [
|
|
"The abstract layout asks one qubit to talk nonlocally to two distant neighbors, so the compile cost is largely self-created",
|
|
"The transpiler output is ugly",
|
|
"The circuit should use more barriers",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Good review language names the structural reason for the cost.",
|
|
},
|
|
{
|
|
"prompt": "What should support a final redesign claim?",
|
|
"options": [
|
|
"Stable constraints, candidate metrics, and a narrative linking the better structure to those numbers",
|
|
"Only personal preference",
|
|
"Only the ideal-state intuition",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "A redesign recommendation should be evidence-backed and constraint-aware.",
|
|
},
|
|
],
|
|
),
|
|
]
|
|
|
|
|
|
HARDWARE_STUDIO_QUIZ = [
|
|
{
|
|
"prompt": "What is the right studio target for this module?",
|
|
"options": [
|
|
"A constraint-aware redesign memo comparing multiple candidate circuits under the same local topology assumptions",
|
|
"A single ideal circuit with no compile study",
|
|
"A notebook that treats the coupling map as background trivia",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The studio should feel like local hardware-aware engineering.",
|
|
},
|
|
{
|
|
"prompt": "Why is human critique still necessary after compilation?",
|
|
"options": [
|
|
"Because the compiler shows what happened, but the designer still decides whether the original layout was wise",
|
|
"Because compiled circuits are never useful",
|
|
"Because metrics disappear after transpilation",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Compilation evidence is necessary, not sufficient, for redesign judgement.",
|
|
},
|
|
]
|
|
|
|
|
|
NOISE_QUIZ_A = [
|
|
{
|
|
"prompt": "What should come before noisy execution in a verification notebook?",
|
|
"options": [
|
|
"A stated invariant or expected signature that can later be tested",
|
|
"A guess that the histogram will be messy",
|
|
"A transpiler seed with no explanation",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Verification needs a falsifiable expectation before it can diagnose anything.",
|
|
},
|
|
{
|
|
"prompt": "Why is an ideal baseline essential?",
|
|
"options": [
|
|
"It separates design defects from expected physical distortion",
|
|
"It eliminates the need for noise models",
|
|
"It guarantees balanced counts",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Without a clean reference, every failure mode gets mixed together.",
|
|
},
|
|
]
|
|
|
|
|
|
NOISE_QUIZ_B = [
|
|
{
|
|
"prompt": "What makes mitigation thinking professional rather than magical?",
|
|
"options": [
|
|
"It starts from diagnosis and explicit invariants instead of assuming every bad result can be rescued",
|
|
"It always restores exact ideal behavior",
|
|
"It never uses post-processing",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Mitigation is bounded, evidence-driven repair work.",
|
|
},
|
|
{
|
|
"prompt": "Why is a buggy ideal circuit not the same as a noisy correct circuit?",
|
|
"options": [
|
|
"Because one fails the intended mechanism and the other distorts it while preserving its overall target",
|
|
"Because both are equally random",
|
|
"Because noise models cannot affect entangled states",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The distinction matters if you want to fix the right problem.",
|
|
},
|
|
]
|
|
|
|
|
|
NOISE_LAB_QUIZ_A = [
|
|
{
|
|
"prompt": "What is the best use of a noisy preview in this lab?",
|
|
"options": [
|
|
"To compare it directly to the ideal baseline while keeping the same measurement contract",
|
|
"To replace ideal simulation completely",
|
|
"To prove the circuit is wrong whenever the counts change",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Noise is informative only relative to a stable baseline.",
|
|
},
|
|
{
|
|
"prompt": "What distinguishes a missing-H bug from mild noise in the Bell example?",
|
|
"options": [
|
|
"The bug destroys the intended 00/11 balance mechanism itself",
|
|
"The bug only changes the transpiler seed",
|
|
"Nothing, they are equivalent",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The design defect changes the causal story, not just the output quality.",
|
|
},
|
|
]
|
|
|
|
|
|
NOISE_LAB_QUIZ_B = [
|
|
{
|
|
"prompt": "Why use a simple correlated-outcome filter or postselection step?",
|
|
"options": [
|
|
"To inspect whether the main failure is leakage outside the intended support or a deeper design mismatch",
|
|
"To guarantee perfect reconstruction",
|
|
"To remove the need to state invariants",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Filtering is most useful when tied to a clear diagnostic question.",
|
|
},
|
|
{
|
|
"prompt": "What would make a mitigation claim weak?",
|
|
"options": [
|
|
"It never says what invariant improved or what error mode it targeted",
|
|
"It compares ideal and noisy counts side by side",
|
|
"It preserves the reporting contract during diagnosis",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Mitigation needs targeted evidence, not generic optimism.",
|
|
},
|
|
]
|
|
|
|
|
|
NOISE_PROBLEM_SETS = [
|
|
(
|
|
"Invariants",
|
|
[
|
|
{
|
|
"prompt": "Which invariant is useful for a Bell-style correctness check?",
|
|
"options": [
|
|
"Support concentrated on correlated outcomes with limited imbalance between 00 and 11",
|
|
"Only total shot count",
|
|
"The number of markdown cells",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "A good invariant captures the intended structure of the state.",
|
|
},
|
|
{
|
|
"prompt": "Why is 'looks entangled' weak evidence?",
|
|
"options": [
|
|
"Because verification needs explicit criteria, not aesthetic impressions",
|
|
"Because entanglement can never be visualized",
|
|
"Because only hardware data matters",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "A professional verifier states what will count as passing evidence.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Bug Versus Noise",
|
|
[
|
|
{
|
|
"prompt": "What is the strongest sign that a result reflects a design bug rather than modest noise?",
|
|
"options": [
|
|
"The ideal circuit already fails the intended invariant",
|
|
"The noisy histogram is not perfectly sharp",
|
|
"The transpiler changed gate names",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "If the clean mechanism is broken, noise is not the primary story.",
|
|
},
|
|
{
|
|
"prompt": "Why run both ideal and noisy variants of the same circuit?",
|
|
"options": [
|
|
"To separate structural correctness from robustness",
|
|
"To avoid writing explanations",
|
|
"To guarantee mitigation works",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The comparison reveals which failure mode you are actually facing.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Mitigation Reasoning",
|
|
[
|
|
{
|
|
"prompt": "What is a strong mitigation sentence?",
|
|
"options": [
|
|
"Postselection raised the correlated-support rate, which suggests leakage was a major issue, but it did not repair the underlying circuit if the ideal invariant already failed",
|
|
"Postselection fixed quantum computing",
|
|
"Filtering makes every circuit correct",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Mitigation claims should stay bounded and mechanism-aware.",
|
|
},
|
|
{
|
|
"prompt": "Why keep mitigation local-first in this project?",
|
|
"options": [
|
|
"Because diagnosis and repair habits should be learnable without cloud dependencies",
|
|
"Because real hardware never matters",
|
|
"Because noise cannot be simulated locally",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Local-first makes the reasoning loop short and reproducible.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Review Language",
|
|
[
|
|
{
|
|
"prompt": "Which review note is strongest?",
|
|
"options": [
|
|
"The notebook compares noisy data to no stated invariant, so the diagnosis is under-specified",
|
|
"The histogram is ugly",
|
|
"Please remove the baseline because it is redundant",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The real issue is the missing diagnostic standard.",
|
|
},
|
|
{
|
|
"prompt": "What should support a final verification claim?",
|
|
"options": [
|
|
"Ideal reference, noisy comparison, explicit invariant, and a reasoned distinction between defect and distortion",
|
|
"Only a single noisy run",
|
|
"Only intuition about what should happen",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Verification requires a multi-layer evidence chain.",
|
|
},
|
|
],
|
|
),
|
|
]
|
|
|
|
|
|
NOISE_STUDIO_QUIZ = [
|
|
{
|
|
"prompt": "What is a strong studio outcome for this module?",
|
|
"options": [
|
|
"A verification notebook that states an invariant, compares clean and noisy behavior, and writes a bounded mitigation note",
|
|
"A notebook that only declares the result noisy",
|
|
"A notebook that assumes any deviation is a design bug",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The studio should culminate in disciplined diagnosis, not generic complaint.",
|
|
},
|
|
{
|
|
"prompt": "Why does this module belong in the professional band?",
|
|
"options": [
|
|
"Because serious circuit work requires debugging and falsification habits, not only construction skill",
|
|
"Because noise makes design impossible",
|
|
"Because ideal simulation is obsolete",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Professional competence includes knowing how to test and challenge a design.",
|
|
},
|
|
]
|
|
|
|
|
|
CAPSTONE_QUIZ_A = [
|
|
{
|
|
"prompt": "What distinguishes a capstone design notebook from an ordinary lab?",
|
|
"options": [
|
|
"It must compare multiple plausible candidates under explicit constraints and end with a recommendation",
|
|
"It must avoid all metrics",
|
|
"It must use the largest circuit in the repo",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Capstone work is defined by comparative judgement and defended choice.",
|
|
},
|
|
{
|
|
"prompt": "Why is a design brief necessary before candidate generation?",
|
|
"options": [
|
|
"Because the winner depends on the stated objective and constraints, not on taste alone",
|
|
"Because it guarantees the first candidate is best",
|
|
"Because Qiskit requires one",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Without a brief, comparison becomes arbitrary.",
|
|
},
|
|
]
|
|
|
|
|
|
CAPSTONE_QUIZ_B = [
|
|
{
|
|
"prompt": "What should a final recommendation include?",
|
|
"options": [
|
|
"The winning candidate, the metrics and observations that favored it, and the risks that remain",
|
|
"Only the prettiest circuit diagram",
|
|
"Only the lowest ideal depth regardless of constraints",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Professional recommendations name both evidence and residual uncertainty.",
|
|
},
|
|
{
|
|
"prompt": "Why benchmark ideal, compiled, and noisy behavior together?",
|
|
"options": [
|
|
"Because a candidate can look good in one lens and weak in another",
|
|
"Because one lens always tells the whole story",
|
|
"Because the extra layers remove the need for prose",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Capstone judgement is multi-criteria, not single-metric worship.",
|
|
},
|
|
]
|
|
|
|
|
|
CAPSTONE_LAB_QUIZ_A = [
|
|
{
|
|
"prompt": "What is the right mindset when comparing candidate families?",
|
|
"options": [
|
|
"Keep the brief fixed and ask which candidate best serves it under the same constraints",
|
|
"Change the objective whenever a favorite candidate looks weak",
|
|
"Ignore transpilation because the capstone is conceptual",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "A fixed brief is what makes candidate comparison honest.",
|
|
},
|
|
{
|
|
"prompt": "Why compare naive, chain, and middle-root GHZ candidates locally?",
|
|
"options": [
|
|
"To see which structure survives line-topology and noise pressure most credibly",
|
|
"Because only one of them creates entanglement",
|
|
"Because local simulation can replace final judgement",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The local study gives a concrete engineering comparison under declared constraints.",
|
|
},
|
|
]
|
|
|
|
|
|
CAPSTONE_LAB_QUIZ_B = [
|
|
{
|
|
"prompt": "What would make a capstone comparison weak?",
|
|
"options": [
|
|
"Selecting a winner before compiling or benchmarking the alternatives",
|
|
"Keeping the objective and constraints fixed",
|
|
"Comparing noisy-success support across candidates",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Premature attachment is the enemy of serious review.",
|
|
},
|
|
{
|
|
"prompt": "What is a useful residual-risk sentence?",
|
|
"options": [
|
|
"The middle-root design wins under the current line model, but the ranking could shift if the error profile changes materially",
|
|
"There are no remaining risks because one candidate won",
|
|
"Risk does not belong in a design recommendation",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Capstone recommendations should acknowledge scope and uncertainty.",
|
|
},
|
|
]
|
|
|
|
|
|
CAPSTONE_PROBLEM_SETS = [
|
|
(
|
|
"Design Brief",
|
|
[
|
|
{
|
|
"prompt": "Which statement best starts a capstone review?",
|
|
"options": [
|
|
"The objective is to prepare a GHZ-style resource on a line topology with strong noisy support and reasonable compile cost",
|
|
"I like this circuit shape more than the others",
|
|
"Any entangled state will do",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The brief defines what the later comparison actually means.",
|
|
},
|
|
{
|
|
"prompt": "Why must constraints be stated rather than implied?",
|
|
"options": [
|
|
"Because they decide which tradeoffs are acceptable",
|
|
"Because constraints never affect rankings",
|
|
"Because metrics replace constraints",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "A good recommendation is always recommendation-under-constraints.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Candidate Benchmarks",
|
|
[
|
|
{
|
|
"prompt": "What is a strong reason to keep multiple candidates alive for a while?",
|
|
"options": [
|
|
"Different candidates may dominate under different evidence layers",
|
|
"Because picking one early is more professional",
|
|
"Because capstones should avoid decisions",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Benchmarking exists because early intuition is often incomplete.",
|
|
},
|
|
{
|
|
"prompt": "What does compiled depth tell you in this module?",
|
|
"options": [
|
|
"Part of the implementation burden under the declared topology and basis set",
|
|
"The exact final fidelity",
|
|
"Whether the candidate is mathematically valid",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Compiled depth is one lens on cost, not the entire verdict.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Recommendation Writing",
|
|
[
|
|
{
|
|
"prompt": "Which recommendation sentence is strongest?",
|
|
"options": [
|
|
"I recommend the middle-root candidate because it preserves the target behavior while reducing compile burden and maintaining the best noisy support under the current model",
|
|
"I recommend the middle-root candidate because it feels elegant",
|
|
"I recommend the first candidate because it appeared first in the notebook",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The strongest sentence links candidate choice to explicit evidence and scope.",
|
|
},
|
|
{
|
|
"prompt": "Why mention residual risk after naming a winner?",
|
|
"options": [
|
|
"Because a professional review distinguishes current evidence from universal truth",
|
|
"Because it weakens the recommendation",
|
|
"Because risks only matter in cloud execution",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Honest recommendations are conditional on model and evidence.",
|
|
},
|
|
],
|
|
),
|
|
(
|
|
"Review Judgement",
|
|
[
|
|
{
|
|
"prompt": "Which review note is strongest?",
|
|
"options": [
|
|
"The notebook names a winner but never explains the objective function used to rank the candidates",
|
|
"The notebook should have more animation",
|
|
"The notebook uses too many metrics by definition",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "A ranking without a criterion is not yet a serious review.",
|
|
},
|
|
{
|
|
"prompt": "What should a capstone notebook leave behind for another engineer?",
|
|
"options": [
|
|
"A clear brief, comparable evidence, and a defensible recommendation",
|
|
"Only the final circuit",
|
|
"Only a celebratory conclusion",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The deliverable is a reviewable decision artifact, not just a result.",
|
|
},
|
|
],
|
|
),
|
|
]
|
|
|
|
|
|
CAPSTONE_STUDIO_QUIZ = [
|
|
{
|
|
"prompt": "What is the right studio goal for the capstone module?",
|
|
"options": [
|
|
"Produce a compact design review that compares candidates and defends one under explicit constraints",
|
|
"Produce the largest notebook in the repository",
|
|
"Avoid final judgement to stay neutral",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "The capstone should culminate in argued choice, not indefinite comparison.",
|
|
},
|
|
{
|
|
"prompt": "Why is the capstone still local-first here?",
|
|
"options": [
|
|
"Because the aim is to train judgement and workflow discipline without outsourcing the reasoning loop",
|
|
"Because hardware-aware thinking is unnecessary",
|
|
"Because local simulation gives exact hardware truth",
|
|
],
|
|
"correct_index": 0,
|
|
"explanation": "Local-first keeps the course controllable while still training real design habits.",
|
|
},
|
|
]
|
|
|
|
|
|
MODULES = [
|
|
{
|
|
"dir": MODULE_01_DIR,
|
|
"title": "Qiskit Patterns and Workflow Design",
|
|
"lecture_intro": """
|
|
The first professional-design module changes the unit of thinking. Up to this point, the course has trained you to understand circuits, engineer them cleanly, and reason about algorithms as reusable motifs. That is necessary, but it is still not the full shape of professional work. In a real project, a circuit almost never lives alone. It sits inside a broader workflow: a design brief defines what question is being asked, configuration or preprocessing shapes the inputs, the circuit is built and executed under explicit assumptions, and a downstream step interprets the outputs into a decision, score, or report. This module teaches that larger unit.
|
|
""",
|
|
"learning_objective": """
|
|
By the end of this lecture you should be able to explain why a Qiskit pattern is more than a reusable circuit, how the map-optimize-execute-post-process cycle changes notebook design, and how to structure a local-first workflow so that another engineer can tell what the stable routine is, what the variable experimental question is, and how the final recommendation is being derived from the raw evidence.
|
|
""",
|
|
"lecture_sections": [
|
|
"""
|
|
The word pattern is easy to misuse. In weak notebook culture, a pattern can mean nothing more than a favorite code idiom or a circuit shape copied into multiple files. In this course the word is stricter. A pattern is a reusable arrangement of responsibilities. It says where configuration belongs, where the quantum body belongs, where the compile or execution context belongs, and where the reporting or decision rule belongs. That definition matters because it turns a runnable notebook into something you can vary, extend, and review without losing the causal story.
|
|
""",
|
|
"""
|
|
The best way to see the need for patterns is to notice how many notebook failures are really workflow failures. A beginner might put configuration, circuit construction, execution, plotting, and interpretation in one long cell. The cell runs, but the roles are mixed together. A later reader cannot tell which parts are stable, which parts are experimental knobs, and which assumptions the final interpretation is silently relying on. The purpose of this module is to prevent that kind of muddle before it hardens into habit.
|
|
""",
|
|
"""
|
|
The map-optimize-execute-post-process language helps because it gives the notebook a sequence of burdens. Map means: what question or data is being translated into a circuit-ready form? Optimize means: what compilation or structural decisions are shaping the circuit before execution? Execute means: under what local conditions is the circuit being sampled or simulated? Post-process means: what derived quantity or decision rule is being computed from the result? None of these stages is optional in serious work, even when some of them remain small in a teaching notebook.
|
|
""",
|
|
"""
|
|
One subtle benefit of pattern thinking is that it improves the quality of explanation as much as the quality of code. When the workflow is well structured, the prose can name each layer cleanly. You can tell a reviewer where the question is configured, where the stable quantum routine lives, and where the score or judgment is produced. That kind of narratability is not fluff. It is the difference between a notebook that merely runs today and a notebook that another engineer could trust, critique, or adapt later.
|
|
""",
|
|
"""
|
|
Pattern design is also where local-first discipline becomes especially valuable. If every stage of the workflow lives inside the same project and can be rerun locally, then debugging becomes much less theatrical. A bad score can be traced back through the record: was the circuit itself wrong, did the measurement contract drift, did the post-processing assume a readout order that the builder never stated, or did the question configuration silently change? These are concrete engineering questions, and they are easiest to learn when the whole loop stays close at hand.
|
|
""",
|
|
"""
|
|
The point of the module is therefore not to make the notebook feel more corporate. It is to give circuits a professional habitat. Once you can build a small local pattern that survives variation and yields a reviewable record, the later modules on redesign, verification, and capstone decision-making become much more natural. They stop feeling like add-ons and start feeling like the next responsibilities inside the same overall workflow.
|
|
""",
|
|
],
|
|
"anchor_intro": """
|
|
The anchor pattern is intentionally small. Its purpose is to make the workflow layers visible, not to impress with circuit complexity. Read the reference table first. Then inspect how the same small routine can be embedded inside a larger record that states the question being asked and the decision derived from the raw counts.
|
|
""",
|
|
"step_refs": PATTERNS_STEP_REFS,
|
|
"anchor_code": PATTERNS_ANCHOR,
|
|
"analysis_code": """
|
|
def build_agreement_probe(basis: str = "z") -> QuantumCircuit:
|
|
circuit = QuantumCircuit(2, 2)
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
if basis == "x":
|
|
circuit.h([0, 1])
|
|
elif basis != "z":
|
|
raise ValueError("basis must be 'z' or 'x'")
|
|
circuit.measure([0, 1], [0, 1])
|
|
return circuit
|
|
|
|
def run_pattern(basis: str, shots: int = 256) -> dict[str, object]:
|
|
circuit = build_agreement_probe(basis)
|
|
compiled = transpile_summary(
|
|
circuit,
|
|
BasicSimulator(),
|
|
basis_gates=["rz", "sx", "x", "cx"],
|
|
)
|
|
counts = simulate_counts(circuit, shots=shots)
|
|
probabilities = counts_to_probabilities(counts)
|
|
agreement = round(probabilities.get("00", 0.0) + probabilities.get("11", 0.0), 3)
|
|
return {
|
|
"basis": basis,
|
|
"depth_after": compiled["depth_after"],
|
|
"agreement_rate": agreement,
|
|
"counts": counts,
|
|
}
|
|
|
|
[run_pattern(basis) for basis in ["z", "x"]]
|
|
""",
|
|
"lecture_quiz_a": PATTERNS_QUIZ_A,
|
|
"lecture_quiz_b": PATTERNS_QUIZ_B,
|
|
"lecture_review_sections": [
|
|
"""
|
|
A strong self-check for this module is whether you can point to each stage of the workflow and say what would count as a mistake there. If the question configuration is wrong, the circuit may still run while answering the wrong question. If the circuit body is wrong, the result record may be beautifully structured and still meaningless. If the measurement contract drifts, the post-processing may produce a polished lie. Pattern thinking helps because it lets you localize those risks instead of treating the whole notebook as one opaque blob.
|
|
""",
|
|
"""
|
|
Another self-check is whether your prose tracks the code boundaries honestly. If the notebook says the builder is reusable, can you name which parameters are genuinely meant to vary? If it says the output score is meaningful, can you say exactly how that score is derived from the counts? Professional workflow design is not only about code refactoring. It is about making the notebook legible as an engineering argument.
|
|
""",
|
|
],
|
|
"lecture_reflections": [
|
|
"Write a paragraph explaining why a circuit builder and a workflow pattern are not the same thing.",
|
|
"Describe one hidden assumption between execution and post-processing that you would want a professional notebook to state explicitly.",
|
|
],
|
|
"lab_intro": """
|
|
The lab treats workflow boundaries as things you can deliberately stress. You will vary the question layer, the structural presentation of the routine, and the reporting contract while keeping the same small local task in view.
|
|
""",
|
|
"lab_protocol": """
|
|
Before every edit, say which workflow layer you are changing and which layers should remain stable. Then inspect the circuit, the counts preview, and the implied result record together. The point is not merely that the circuit still runs. The point is whether the workflow remains reviewable.
|
|
""",
|
|
"lab_sections": [
|
|
{
|
|
"heading": "Lab 1: Question Layer And Basis Choice",
|
|
"intro": """
|
|
Start with the smallest meaningful variation: keep the core routine recognizable while changing the question the workflow asks of it. This is the fastest way to see why configuration belongs outside the stable body of the pattern.
|
|
""",
|
|
"step_refs": PATTERNS_STEP_REFS,
|
|
"editable_code": PATTERNS_ANCHOR,
|
|
"editable_title": "Lab 1: Workflow Question Layer",
|
|
"editable_instructions": "Switch the basis between z and x, and explain which part of the notebook changed and which part remained the reusable core.",
|
|
},
|
|
{
|
|
"heading": "Lab 2: Structured Pattern Versus Flat Cell",
|
|
"intro": """
|
|
Next compare a lean pattern with one that adds deliberate structure markers such as barriers or clearer layer boundaries. The important question is not which version is shorter. It is which one tells the workflow story more clearly.
|
|
""",
|
|
"editable_code": PATTERNS_CONFIG_EDITABLE,
|
|
"editable_title": "Lab 2: Structured Pattern Boundaries",
|
|
"editable_instructions": "Toggle the barrier and basis choices, then decide whether the structure is clarifying the workflow or merely decorating it.",
|
|
},
|
|
{
|
|
"heading": "Lab 3: Reporting Contract Stress Test",
|
|
"intro": """
|
|
Finally, attack the interface between the circuit and its post-processing by changing the reporting order. This is where many workflow notebooks quietly fail even when the circuit body is fine.
|
|
""",
|
|
"editable_code": PATTERNS_REPORTING_EDITABLE,
|
|
"editable_title": "Lab 3: Reporting Contract",
|
|
"editable_instructions": "Change the measurement mapping only if you can also explain how the downstream interpretation would have to change.",
|
|
},
|
|
],
|
|
"lab_quiz_a": PATTERNS_LAB_QUIZ_A,
|
|
"lab_quiz_b": PATTERNS_LAB_QUIZ_B,
|
|
"lab_debrief": """
|
|
The main lesson of the lab is that workflow quality can be damaged without changing the core quantum idea at all. A notebook can keep the same circuit body and still become worse if the question configuration becomes hidden, the reporting contract becomes implicit, or the downstream score stops being traceable. That is why the professional band begins here. It changes what you think the notebook itself is responsible for.
|
|
""",
|
|
"lab_reflections": [
|
|
"Which workflow boundary became most visible to you in the lab, and why?",
|
|
"Write a short review note about a notebook whose circuit is fine but whose reporting contract is ambiguous.",
|
|
],
|
|
"problem_intro": """
|
|
These problems test whether you can reason about patterns as engineering artifacts rather than only as circuit snippets.
|
|
""",
|
|
"problem_how": """
|
|
When an answer sounds attractive because it compresses several responsibilities into one vague statement, treat that as a warning sign. The strong answers in this module preserve clear boundaries between intent, circuit body, and evidence.
|
|
""",
|
|
"problem_sets": PATTERNS_PROBLEM_SETS,
|
|
"problem_case": """
|
|
A very common failure in notebook-heavy projects is the illusion of reuse. A cell is copied a few times, a parameter or two is exposed, and the writer calls the result a pattern. But the pattern is not real if the question being asked is still implicit, the output interpretation depends on unstated conventions, or a future reader cannot tell which parts are meant to vary. These problems are trying to inoculate you against that weak standard.
|
|
""",
|
|
"problem_reflections": [
|
|
"Explain why an execution record with no design brief is weaker than it first appears.",
|
|
"Describe the minimum information a local-first workflow notebook should record so another engineer can rerun and critique it.",
|
|
],
|
|
"problem_exit": """
|
|
Move on when you can read a notebook as a workflow pattern with responsibilities, not merely as a series of runnable cells.
|
|
""",
|
|
"studio_intro": """
|
|
The studio turns the module into a design exercise: create a small workflow artifact that you would actually trust and hand to another engineer.
|
|
""",
|
|
"studio_brief": """
|
|
Build a compact local-first pattern notebook that exposes configuration, quantum routine, execution assumptions, and result interpretation clearly enough that a reviewer could modify one layer without guessing what the other layers are doing.
|
|
""",
|
|
"studio_sections": [
|
|
{
|
|
"heading": "Studio Prompt 1: Reusable Workflow Shell",
|
|
"intro": """
|
|
Create a small pattern shell that makes the stable routine visible and parameterizes the question being asked. The challenge is clarity, not size.
|
|
""",
|
|
"editable_code": PATTERNS_ANCHOR,
|
|
"editable_title": "Studio 1: Workflow Shell",
|
|
"editable_instructions": "Refine the builder until the question layer and the stable circuit layer are visibly separate.",
|
|
},
|
|
{
|
|
"heading": "Studio Prompt 2: Explicit Result Record",
|
|
"intro": """
|
|
Turn the same pattern into a notebook that would still make sense a week later by recording what was asked and how the result should be interpreted.
|
|
""",
|
|
"editable_code": PATTERNS_CONFIG_EDITABLE,
|
|
"editable_title": "Studio 2: Result Record Pattern",
|
|
"editable_instructions": "Structure the pattern so a downstream result record could be attached without guessing hidden assumptions.",
|
|
},
|
|
{
|
|
"heading": "Studio Prompt 3: Interface Audit",
|
|
"intro": """
|
|
Use the reporting-contract variant to write a short interface audit. If the reporting order changes, what else in the workflow must change with it?
|
|
""",
|
|
"editable_code": PATTERNS_REPORTING_EDITABLE,
|
|
"editable_title": "Studio 3: Interface Audit",
|
|
"editable_instructions": "Treat the reporting map as an API decision and explain what a reviewer would need to know before trusting it.",
|
|
},
|
|
],
|
|
"studio_quiz": PATTERNS_STUDIO_QUIZ,
|
|
"studio_debrief": """
|
|
A strong studio notebook in this module feels like a small internal tool. It does not only show a circuit. It states how the circuit is being asked to act inside a wider local-first workflow and how the notebook arrives at its interpretation of the result.
|
|
""",
|
|
"studio_reflections": [
|
|
"Which part of your workflow shell would another engineer most likely need to change first?",
|
|
"How did you make the reporting contract explicit in the final studio version?",
|
|
"What information did you decide belonged in the final result record?",
|
|
"Name one workflow habit from this module that should survive all later capstone work.",
|
|
],
|
|
},
|
|
{
|
|
"dir": MODULE_02_DIR,
|
|
"title": "Hardware-Aware Redesign Studio",
|
|
"lecture_intro": """
|
|
The earlier transpilation module taught you how to read compiled consequences. This professional module raises the standard. It is no longer enough to observe that the transpiler inserted extra work. You now need to decide when that burden was self-created by the abstract circuit and when a human redesign could have reduced it. This is the module where topology and basis constraints become something you actively design against rather than something you merely endure.
|
|
""",
|
|
"learning_objective": """
|
|
By the end of this lecture you should be able to explain how a topology or basis-gate model changes the meaning of a good abstract circuit, inspect a compiled rewrite and identify where the burden came from, and defend a manual redesign that keeps the objective stable while fitting the declared local constraints more intelligently.
|
|
""",
|
|
"lecture_sections": [
|
|
"""
|
|
A hardware-aware notebook begins with intellectual honesty about the abstract circuit. There is nothing wrong with writing the clean ideal version first. In fact, doing so is useful because it makes the original intention explicit. The mistake is pretending that the ideal structure is still automatically a good design once line topology, basis restrictions, or routing pressure enter the picture. This module is about learning when the gap between abstract intention and constrained implementation becomes large enough that a human should intervene.
|
|
""",
|
|
"""
|
|
Transpilation is often introduced as a convenience layer, but in serious practice it is also a diagnostic instrument. The compiled circuit is evidence. It tells you how expensive your abstract layout became under the declared constraints. But evidence is not yet judgment. A professional designer still has to ask why the compiled circuit became expensive. Did the abstract routine force a nonlocal interaction pattern? Did the choice of control qubit create unnecessary route length? Did a reusable block remain elegant in theory but become painful on the given coupling map? Those are design questions, not compiler questions.
|
|
""",
|
|
"""
|
|
The coupling map is especially important because it turns geometry into cost. A line topology makes some interactions cheap and others awkward. That does not merely change the numbers in a summary table. It changes which circuit shapes are responsible and which are careless. A star-like entangling pattern may be perfectly readable in ideal mode and yet become an invitation to routing overhead under a line. Once you understand that, redesign stops feeling like a secondary optimization pass and starts feeling like part of the original design duty.
|
|
""",
|
|
"""
|
|
Manual redesign should still be disciplined, not heroic. The goal is not to outsmart the compiler at every step or to assume that human intervention is always superior. The goal is to propose a small number of plausible alternatives whose structural relationship to the topology is clearer. If one alternative reduces compile cost while preserving the objective and the evidence path, that is useful. If not, the comparison still teaches you something about when default compilation is already doing reasonable work.
|
|
""",
|
|
"""
|
|
This module also strengthens review language. Saying that a circuit is more hardware-aware because it looks less busy is weak. A better sentence says that a middle-root layout aligns the entangling structure more closely with the line topology, reducing the need for route-inducing rewrites. That kind of sentence is stronger because it names the structural reason, not only the observed outcome. The notebook should increasingly sound like engineering review, not like subjective taste.
|
|
""",
|
|
"""
|
|
Hardware-aware redesign is therefore the first fully mature module in the course. It expects you to juggle ideal intention, explicit local constraints, compile evidence, and candidate comparison at once. If that feels more demanding than earlier modules, that is exactly right. Professional circuit design lives in that pressure field.
|
|
""",
|
|
],
|
|
"anchor_intro": """
|
|
The anchor circuit is intentionally topology-hostile on a line. That is not a bug in the lesson. It is what makes the redesign problem visible. Read the reference table, inspect the abstract circuit, and then compare it to a human-aware alternative under the same local constraint model.
|
|
""",
|
|
"step_refs": HARDWARE_STEP_REFS,
|
|
"anchor_code": HARDWARE_ANCHOR,
|
|
"anchor_context_source": LINE_CONTEXT,
|
|
"anchor_context_name": "simulate_line_counts",
|
|
"analysis_code": """
|
|
LOCAL_BASIS = ["rz", "sx", "x", "cx"]
|
|
|
|
def naive_star_ghz() -> QuantumCircuit:
|
|
circuit = QuantumCircuit(4, 4)
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.cx(0, 2)
|
|
circuit.cx(0, 3)
|
|
circuit.measure([0, 1, 2, 3], [0, 1, 2, 3])
|
|
return circuit
|
|
|
|
def middle_root_ghz() -> QuantumCircuit:
|
|
circuit = QuantumCircuit(4, 4)
|
|
circuit.h(1)
|
|
circuit.cx(1, 0)
|
|
circuit.cx(1, 2)
|
|
circuit.cx(2, 3)
|
|
circuit.measure([0, 1, 2, 3], [0, 1, 2, 3])
|
|
return circuit
|
|
|
|
results = []
|
|
for name, builder in [("naive", naive_star_ghz), ("middle_root", middle_root_ghz)]:
|
|
circuit = builder()
|
|
summary = transpile_summary(
|
|
circuit,
|
|
BasicSimulator(),
|
|
basis_gates=LOCAL_BASIS,
|
|
coupling_map=line_coupling_map(4),
|
|
optimization_level=1,
|
|
)
|
|
results.append(
|
|
{
|
|
"candidate": name,
|
|
"depth_after": summary["depth_after"],
|
|
"size_after": summary["size_after"],
|
|
"ops_after": summary["ops_after"],
|
|
}
|
|
)
|
|
|
|
results
|
|
""",
|
|
"lecture_quiz_a": HARDWARE_QUIZ_A,
|
|
"lecture_quiz_b": HARDWARE_QUIZ_B,
|
|
"lecture_review_sections": [
|
|
"""
|
|
A strong self-check in this module is whether you can explain compile inflation in structural terms. If all you can say is that the transpiled circuit got deeper, you have observed a symptom but not yet analyzed a cause. Professional redesign begins when you can name the abstract interaction pattern that invited the cost.
|
|
""",
|
|
"""
|
|
Another self-check is whether your redesign keeps the objective stable. It is easy to make a circuit cheaper by quietly changing what it is trying to do. That is not redesign. That is drift. The discipline of this module is to keep the intended behavior and reporting contract fixed while the internal structure is reconsidered under constraints.
|
|
""",
|
|
],
|
|
"lecture_reflections": [
|
|
"Explain why a compiled circuit is evidence but not yet a final design verdict.",
|
|
"Describe one structural feature that can make an abstract circuit needlessly hostile to a line topology.",
|
|
],
|
|
"lab_intro": """
|
|
The lab turns topology pressure into something concrete. You will stress an abstract circuit, inspect the cost, and then edit alternative layouts that try to preserve the same objective more intelligently.
|
|
""",
|
|
"lab_protocol": """
|
|
Keep the constraints fixed while you compare candidates. Use the same basis-gate set, the same coupling map, and the same reporting contract. Only then do the structural differences mean what you think they mean.
|
|
""",
|
|
"lab_sections": [
|
|
{
|
|
"heading": "Lab 1: Topology Stress Test",
|
|
"intro": """
|
|
Start with the naive abstract circuit and let the line-topology preview expose its pressure. The purpose is not to shame the ideal circuit. The purpose is to learn how the mismatch shows up.
|
|
""",
|
|
"step_refs": HARDWARE_STEP_REFS,
|
|
"editable_code": HARDWARE_ANCHOR,
|
|
"editable_title": "Lab 1: Topology Stress",
|
|
"editable_instructions": "Edit the abstract entangling pattern and watch how the line-constrained preview reacts. Explain which interaction is creating the pressure.",
|
|
"context_source": LINE_CONTEXT,
|
|
"context_name": "simulate_line_counts",
|
|
},
|
|
{
|
|
"heading": "Lab 2: Manual Redesign",
|
|
"intro": """
|
|
Now try a line-friendlier candidate whose abstract structure already anticipates the coupling map. The lesson is not that one circuit shape always wins. The lesson is that you should be able to propose a plausible alternative and explain why it fits the hardware story better.
|
|
""",
|
|
"editable_code": HARDWARE_MANUAL_EDITABLE,
|
|
"editable_title": "Lab 2: Manual Redesign",
|
|
"editable_instructions": "Refine the line-aware candidate while keeping the objective and readout stable. Explain why the new layout should ask less of the compiler.",
|
|
"context_source": LINE_CONTEXT,
|
|
"context_name": "simulate_line_counts",
|
|
},
|
|
{
|
|
"heading": "Lab 3: Candidate Comparison",
|
|
"intro": """
|
|
Compare several candidate families under the same local constraints. This is the closest the module gets to real redesign practice: more than one plausible answer, one fixed constraint model, and a need for explicit judgement.
|
|
""",
|
|
"editable_code": HARDWARE_COMPARE_EDITABLE,
|
|
"editable_title": "Lab 3: Candidate Comparison",
|
|
"editable_instructions": "Switch among naive, chain, and middle_root candidates. Say which topology pressure you expect each one to create before trusting the counts or metrics.",
|
|
"context_source": LINE_CONTEXT,
|
|
"context_name": "simulate_line_counts",
|
|
},
|
|
],
|
|
"lab_quiz_a": HARDWARE_LAB_QUIZ_A,
|
|
"lab_quiz_b": HARDWARE_LAB_QUIZ_B,
|
|
"lab_debrief": """
|
|
The key change after this lab should be that topology no longer feels like an external annoyance. It should feel like part of the circuit-design problem itself. Once that shift happens, the difference between observing transpilation and redesigning for it becomes much clearer.
|
|
""",
|
|
"lab_reflections": [
|
|
"Which candidate in the lab most clearly demonstrated self-inflicted compile cost, and why?",
|
|
"Write a short redesign note defending one manual alternative under the fixed line-topology assumptions.",
|
|
],
|
|
"problem_intro": """
|
|
These problems check whether you can turn compile evidence into design judgement instead of stopping at surface observations.
|
|
""",
|
|
"problem_how": """
|
|
Prefer answers that locate cost in structure and keep the objective stable. Reject answers that talk about hardware awareness as though it were a mood instead of a concrete constraint-and-redesign practice.
|
|
""",
|
|
"problem_sets": HARDWARE_PROBLEM_SETS,
|
|
"problem_case": """
|
|
A weak hardware-aware notebook often has the right ingredients but the wrong posture. It shows an abstract circuit, a compiled circuit, and maybe a couple of metrics, but it never really asks whether the original layout was a wise thing to ask of the hardware in the first place. This problems notebook pushes against that passivity. The point is not to admire compiler output. The point is to decide whether a human should have designed differently.
|
|
""",
|
|
"problem_reflections": [
|
|
"Explain why a topology-aware alternative is not automatically better unless the objective is kept stable.",
|
|
"Describe the minimum evidence you would want before accepting a hardware-aware redesign recommendation.",
|
|
],
|
|
"problem_exit": """
|
|
Move on when you can name the structural source of compile cost and propose a plausible human-aware alternative under fixed constraints.
|
|
""",
|
|
"studio_intro": """
|
|
The studio makes the redesign standard explicit: build, compare, justify.
|
|
""",
|
|
"studio_brief": """
|
|
Create a small local redesign memo that begins with an abstract target, benchmarks at least two constrained candidates, and ends with a clear recommendation tied to the declared topology and basis assumptions.
|
|
""",
|
|
"studio_sections": [
|
|
{
|
|
"heading": "Studio Prompt 1: Stress A Clean Ideal Circuit",
|
|
"intro": """
|
|
Start from a clean abstract target and let the local constraint model expose where it becomes awkward.
|
|
""",
|
|
"editable_code": HARDWARE_ANCHOR,
|
|
"editable_title": "Studio 1: Ideal Target Under Pressure",
|
|
"editable_instructions": "Use the naive target as a reference point, not as the automatic winner.",
|
|
"context_source": LINE_CONTEXT,
|
|
"context_name": "simulate_line_counts",
|
|
},
|
|
{
|
|
"heading": "Studio Prompt 2: Propose A Human Alternative",
|
|
"intro": """
|
|
Produce an alternative whose structure anticipates the line rather than waiting for the transpiler to rescue it.
|
|
""",
|
|
"editable_code": HARDWARE_MANUAL_EDITABLE,
|
|
"editable_title": "Studio 2: Human-Aware Alternative",
|
|
"editable_instructions": "Refine a line-aware candidate and prepare to defend it with metrics and structural reasoning.",
|
|
"context_source": LINE_CONTEXT,
|
|
"context_name": "simulate_line_counts",
|
|
},
|
|
{
|
|
"heading": "Studio Prompt 3: Write The Recommendation",
|
|
"intro": """
|
|
Compare the candidate family and make a choice like an engineer rather than a spectator.
|
|
""",
|
|
"editable_code": HARDWARE_COMPARE_EDITABLE,
|
|
"editable_title": "Studio 3: Recommendation Candidate Family",
|
|
"editable_instructions": "Use candidate comparison to support a recommendation that explicitly names the topology pressure and the chosen response.",
|
|
"context_source": LINE_CONTEXT,
|
|
"context_name": "simulate_line_counts",
|
|
},
|
|
],
|
|
"studio_quiz": HARDWARE_STUDIO_QUIZ,
|
|
"studio_debrief": """
|
|
A successful studio notebook here reads like a redesign memo, not like a complaint about compilation. It should show what the abstract circuit was trying to do, how the local constraints punished it, and why the recommended alternative better fits the declared hardware story.
|
|
""",
|
|
"studio_reflections": [
|
|
"Which structural feature turned out to matter most in your final redesign recommendation?",
|
|
"What evidence from the constrained comparison most influenced your choice?",
|
|
"How did you keep the reporting contract stable across the candidates?",
|
|
"Name one redesign habit from this module that should carry into the capstone.",
|
|
],
|
|
},
|
|
{
|
|
"dir": MODULE_03_DIR,
|
|
"title": "Noise-Aware Verification and Mitigation",
|
|
"lecture_intro": """
|
|
By the time a learner reaches this module, they often know enough to be dangerous. They can build nontrivial circuits, they can transpile them, and they can run noisy simulations. What they still may not know how to do is diagnose failure cleanly. Was the design broken in ideal form? Did the correct design merely degrade under noise? Is the evidence strong enough to tell the difference? And if a simple mitigation idea helps, what exactly did it help with? This module turns those questions into a professional workflow.
|
|
""",
|
|
"learning_objective": """
|
|
By the end of this lecture you should be able to state meaningful verification invariants, compare ideal and noisy behavior without confusing the two, distinguish a design defect from expected physical distortion, and write a bounded mitigation note that says what improved, what did not, and why the conclusion is not magical.
|
|
""",
|
|
"lecture_sections": [
|
|
"""
|
|
Verification begins before execution. That sentence sounds simple, but it changes everything. A weak notebook executes first and only later squints at the histogram. A strong notebook states what it intends to see and why. Invariants, expected support patterns, balance conditions, and other criteria are not bureaucratic overhead. They are what make later diagnosis possible. Without them, every surprising result looks equally mysterious and every attempted fix becomes guesswork.
|
|
""",
|
|
"""
|
|
The next crucial distinction is between design error and physical distortion. A design error is present in the ideal circuit already. A physical distortion is what happens when a conceptually correct design is placed under imperfect execution conditions. The two can combine, which is exactly why the notebook needs to keep the ideal baseline alive. The ideal baseline is not there to comfort you. It is there to tell you whether the intended mechanism existed before noise ever entered the picture.
|
|
""",
|
|
"""
|
|
Noise-aware reasoning also requires a better attitude toward mitigation. Mitigation is not a magical layer that transforms any disappointing output into success. It is a targeted attempt to compensate for identifiable failure modes. That means a mitigation step should be linked to a diagnosis. If postselection on correlated outcomes improves a Bell-style experiment, that suggests leakage outside the intended support was important. It does not prove that the underlying design is correct in every deeper sense, and it certainly does not justify skipping the ideal baseline.
|
|
""",
|
|
"""
|
|
Local-first tooling is a serious advantage here because it shortens the diagnostic loop. You can define a circuit, run it ideally, run it with a local noise model, inspect the difference, and then test a bounded mitigation idea without waiting on external systems or shifting machine state. That repeatability matters. Verification skill grows through many small comparisons, not through one dramatic experiment.
|
|
""",
|
|
"""
|
|
A professional notebook should therefore sound more like a case file than like a performance. It should state the invariant, present the ideal reference, show the noisy comparison, and say what conclusion follows. If a mitigation is tried, the notebook should say which failure mode it was targeting and what residual uncertainty remains. This style may feel less flashy, but it is much more powerful. It creates notebooks that can support real debugging and review.
|
|
""",
|
|
"""
|
|
The broader lesson of the module is that professional confidence comes from the ability to falsify your own story. If you can say what would count as a failing ideal baseline, what would count as acceptable noisy degradation, and what evidence would justify a limited mitigation claim, then you are no longer treating quantum results as theatrical surprises. You are treating them as engineering evidence.
|
|
""",
|
|
],
|
|
"anchor_intro": """
|
|
The anchor example is intentionally simple because the diagnostic logic is the point. A Bell-style circuit gives you a clear ideal signature, a clear noisy degradation story, and a plausible place to test bounded mitigation ideas.
|
|
""",
|
|
"step_refs": NOISE_STEP_REFS,
|
|
"anchor_code": NOISE_ANCHOR,
|
|
"anchor_context_source": NOISE_CONTEXT,
|
|
"anchor_context_name": "simulate_noisy_counts",
|
|
"analysis_code": """
|
|
demo_noise = build_demo_noise_model(
|
|
single_qubit_error=0.01,
|
|
two_qubit_error=0.05,
|
|
readout_error=0.03,
|
|
)
|
|
|
|
def bell_candidate(bug: bool = False) -> QuantumCircuit:
|
|
circuit = QuantumCircuit(2, 2)
|
|
if not bug:
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.measure([0, 1], [0, 1])
|
|
return circuit
|
|
|
|
def bell_report(bug: bool, noisy: bool) -> dict[str, object]:
|
|
counts = simulate_counts(
|
|
bell_candidate(bug),
|
|
shots=256,
|
|
noise_model=demo_noise if noisy else None,
|
|
)
|
|
probabilities = counts_to_probabilities(counts)
|
|
correlated_support = round(
|
|
probabilities.get("00", 0.0) + probabilities.get("11", 0.0),
|
|
3,
|
|
)
|
|
balance_gap = round(
|
|
abs(probabilities.get("00", 0.0) - probabilities.get("11", 0.0)),
|
|
3,
|
|
)
|
|
return {
|
|
"bug": bug,
|
|
"noisy": noisy,
|
|
"correlated_support": correlated_support,
|
|
"balance_gap": balance_gap,
|
|
"counts": counts,
|
|
}
|
|
|
|
[
|
|
bell_report(False, False),
|
|
bell_report(False, True),
|
|
bell_report(True, False),
|
|
bell_report(True, True),
|
|
]
|
|
""",
|
|
"lecture_quiz_a": NOISE_QUIZ_A,
|
|
"lecture_quiz_b": NOISE_QUIZ_B,
|
|
"lecture_review_sections": [
|
|
"""
|
|
A strong self-check for this module is whether you can tell a tight diagnostic story in one paragraph. Can you say what the invariant was, what the ideal baseline showed, what the noisy comparison changed, and whether the main failure still looks like noise, design, or both? If not, the notebook may still be gathering data but it is not yet performing verification.
|
|
""",
|
|
"""
|
|
Another self-check is whether your mitigation language stays bounded. If a local filter improves a summary statistic, can you say exactly which statistic improved and why that does not automatically prove global correctness? This boundedness is not pessimism. It is what makes the notebook professionally trustworthy.
|
|
""",
|
|
],
|
|
"lecture_reflections": [
|
|
"Explain why a verification notebook should state an invariant before executing a noisy simulation.",
|
|
"Describe the difference between a design defect and a noise effect using one concrete example.",
|
|
],
|
|
"lab_intro": """
|
|
The lab turns diagnosis into a series of controlled comparisons: correct versus buggy, ideal versus noisy, and raw versus filtered evidence.
|
|
""",
|
|
"lab_protocol": """
|
|
Keep the reporting contract fixed while you compare cases. The point is to isolate why the outcome changed, not to change the whole experiment and call the result insight.
|
|
""",
|
|
"lab_sections": [
|
|
{
|
|
"heading": "Lab 1: Ideal And Noisy Baselines",
|
|
"intro": """
|
|
Start by comparing the same correct circuit in ideal and noisy form. This establishes what the expected distortion looks like when the underlying mechanism is intact.
|
|
""",
|
|
"step_refs": NOISE_STEP_REFS,
|
|
"editable_code": NOISE_ANCHOR,
|
|
"editable_title": "Lab 1: Ideal Versus Noisy Baseline",
|
|
"editable_instructions": "Keep the correct Bell-style mechanism intact and compare how the same evidence path behaves under local noise.",
|
|
"context_source": NOISE_CONTEXT,
|
|
"context_name": "simulate_noisy_counts",
|
|
},
|
|
{
|
|
"heading": "Lab 2: Bug Versus Noise",
|
|
"intro": """
|
|
Now introduce a deliberate design defect and compare that story to mere noisy degradation. The goal is to make the difference between broken mechanism and distorted mechanism feel concrete.
|
|
""",
|
|
"editable_code": NOISE_BUG_EDITABLE,
|
|
"editable_title": "Lab 2: Design Bug Stress Test",
|
|
"editable_instructions": "Toggle the bug switch and explain why the resulting failure mode is not the same as mild noise on a correct design.",
|
|
},
|
|
{
|
|
"heading": "Lab 3: Bounded Filtering And Mitigation",
|
|
"intro": """
|
|
Finally, use a simple filtering idea to see what can and cannot be improved after the fact. This is not a miracle stage. It is a disciplined diagnostic stage.
|
|
""",
|
|
"editable_code": NOISE_FILTER_EDITABLE,
|
|
"editable_title": "Lab 3: Filtering And Mitigation",
|
|
"editable_instructions": "Keep the circuit fixed and think about what a correlated-outcome filter would reveal, rather than promising that it fixes everything.",
|
|
"context_source": NOISE_CONTEXT,
|
|
"context_name": "simulate_noisy_counts",
|
|
},
|
|
],
|
|
"lab_quiz_a": NOISE_LAB_QUIZ_A,
|
|
"lab_quiz_b": NOISE_LAB_QUIZ_B,
|
|
"lab_debrief": """
|
|
The lab should leave you less tolerant of vague debugging. You now have a cleaner language for saying whether a problem is already visible ideally, whether noise is merely degrading a correct mechanism, and whether a simple mitigation step is clarifying a specific failure mode or merely masking confusion.
|
|
""",
|
|
"lab_reflections": [
|
|
"Which comparison in the lab most clarified the difference between a defect and a distortion?",
|
|
"Write a short mitigation note that improves one statistic without overstating what was fixed.",
|
|
],
|
|
"problem_intro": """
|
|
These problems test whether your verification language has become sharp enough to survive small changes in wording and evidence context.
|
|
""",
|
|
"problem_how": """
|
|
Prefer answers that separate mechanism, evidence, and conclusion. Reject answers that use the word noise or mitigation as if they explained themselves.
|
|
""",
|
|
"problem_sets": NOISE_PROBLEM_SETS,
|
|
"problem_case": """
|
|
Many weak notebooks reach for the word noise too quickly. Something went wrong, the histogram looks imperfect, therefore the explanation must be noise. That move is dangerous because it can hide design defects behind physical-sounding language. These problems are designed to slow you down and make you earn every diagnostic conclusion.
|
|
""",
|
|
"problem_reflections": [
|
|
"Explain why a noisy result without an ideal comparison is diagnostically weak.",
|
|
"Describe one verification invariant you would want before trusting a more advanced circuit family.",
|
|
],
|
|
"problem_exit": """
|
|
Move on when you can write a short diagnosis that distinguishes defect, distortion, and bounded mitigation cleanly.
|
|
""",
|
|
"studio_intro": """
|
|
The studio turns diagnosis into a compact verification case study.
|
|
""",
|
|
"studio_brief": """
|
|
Produce a local verification notebook that states the intended invariant, compares clean and noisy behavior, and writes a mitigation note that is useful precisely because it is limited and evidence-backed.
|
|
""",
|
|
"studio_sections": [
|
|
{
|
|
"heading": "Studio Prompt 1: Baseline Case File",
|
|
"intro": """
|
|
Start with a correct baseline and make the invariant as explicit as the circuit itself.
|
|
""",
|
|
"editable_code": NOISE_ANCHOR,
|
|
"editable_title": "Studio 1: Baseline Case File",
|
|
"editable_instructions": "Use the correct circuit as a baseline and write the invariant you want the later noisy comparison to respect.",
|
|
"context_source": NOISE_CONTEXT,
|
|
"context_name": "simulate_noisy_counts",
|
|
},
|
|
{
|
|
"heading": "Studio Prompt 2: Defect Comparison",
|
|
"intro": """
|
|
Add a deliberate bug so your case file contains a real contrast between broken mechanism and noisy degradation.
|
|
""",
|
|
"editable_code": NOISE_BUG_EDITABLE,
|
|
"editable_title": "Studio 2: Defect Comparison",
|
|
"editable_instructions": "Use the bug toggle to construct a contrast case and explain what diagnostic conclusion the comparison supports.",
|
|
},
|
|
{
|
|
"heading": "Studio Prompt 3: Bounded Mitigation Note",
|
|
"intro": """
|
|
Finish by writing the most disciplined mitigation note you can: specific, useful, and limited.
|
|
""",
|
|
"editable_code": NOISE_FILTER_EDITABLE,
|
|
"editable_title": "Studio 3: Mitigation Note",
|
|
"editable_instructions": "Treat filtering or postselection as a diagnostic tool and say exactly what it improves and what it leaves unresolved.",
|
|
"context_source": NOISE_CONTEXT,
|
|
"context_name": "simulate_noisy_counts",
|
|
},
|
|
],
|
|
"studio_quiz": NOISE_STUDIO_QUIZ,
|
|
"studio_debrief": """
|
|
A strong studio notebook in this module reads like a careful case file. It says what should happen, what actually happened ideally, what changed under noise, and why any mitigation claim remains bounded by explicit evidence.
|
|
""",
|
|
"studio_reflections": [
|
|
"Which invariant anchored your final verification case study?",
|
|
"What evidence let you separate defect from distortion most confidently?",
|
|
"How did you keep your mitigation note bounded and honest?",
|
|
"Name one verification habit from this module that must survive into the capstone.",
|
|
],
|
|
},
|
|
{
|
|
"dir": MODULE_04_DIR,
|
|
"title": "Capstone Circuit Design Review",
|
|
"lecture_intro": """
|
|
The capstone is where the whole curriculum finally behaves like professional engineering. Earlier modules taught pieces of the job: literacy, circuit construction, algorithmic reasoning, workflow design, topology-aware redesign, and noise-aware verification. The capstone asks you to combine them into a single act of judgement. A design brief is stated. Multiple candidates are created. Each candidate is benchmarked under the declared local assumptions. A winner is recommended. Risks are named. The notebook becomes a design review rather than a guided tour.
|
|
""",
|
|
"learning_objective": """
|
|
By the end of this lecture you should be able to define a constrained design brief, keep several plausible candidate circuits alive long enough to compare them credibly, benchmark them across ideal, compiled, and noisy lenses, and write a recommendation that names the chosen candidate, the evidence that supports it, and the conditions under which the conclusion might change.
|
|
""",
|
|
"lecture_sections": [
|
|
"""
|
|
The first capstone skill is resisting premature attachment. Designers often fall in love with a candidate before the evidence exists. They like a particular circuit shape, a certain abstraction, or a favored intuition, and then the notebook becomes a search for confirmation. This module is built to oppose that habit. The brief comes first. The candidates come second. The recommendation comes last. That order is what protects the integrity of the comparison.
|
|
""",
|
|
"""
|
|
A good design brief is tighter than a vague goal but looser than a predetermined winner. It says what the circuit should accomplish, what environment or constraint model matters, and what evidence will be used to compare success. In a local-first capstone, that often means ideal behavior, compiled burden under a declared topology, and noisy support under a local noise model. Those lenses do not tell the same story, which is precisely why the capstone needs all of them.
|
|
""",
|
|
"""
|
|
Candidate generation is also more disciplined than it first appears. The point is not to invent random variants and hope one looks impressive. The point is to propose alternatives that are plausible enough to deserve comparison and distinct enough to reveal tradeoffs. In this module the candidate family is intentionally small so the review burden stays visible. But the pattern is the same one you would use on larger work: define a brief, create multiple credible candidates, and let the evidence eliminate or elevate them.
|
|
""",
|
|
"""
|
|
Benchmarking across several lenses is what turns the notebook into a review artifact. Ideal behavior tells you whether the intended mechanism exists. Compiled cost under fixed constraints tells you what the implementation burden looks like. Noisy support tells you whether a candidate that seems elegant on paper survives a more realistic local model. None of these lenses alone is a sufficient verdict. The capstone teaches you how to hold them together without becoming vague.
|
|
""",
|
|
"""
|
|
Recommendation writing is the final skill. A weak recommendation says which candidate won. A strong recommendation says why it won under the declared brief and what risks remain. That risk sentence matters. It prevents the common capstone failure in which a local comparison is quietly treated as a universal theorem. Professional reviews are stronger when they are conditional in the right way. They say: under these assumptions, with this evidence, this is the candidate I recommend.
|
|
""",
|
|
"""
|
|
The capstone therefore closes the course on the right standard. It does not ask whether you can produce a circuit. It asks whether you can think like the person responsible for choosing and defending one. That is the difference between notebook completion and professional design judgement.
|
|
""",
|
|
],
|
|
"anchor_intro": """
|
|
The capstone anchor uses a small GHZ-style design brief under line-topology and noisy local assumptions. The circuit family is intentionally limited. The goal is not breadth of search. The goal is depth of comparison and clarity of recommendation.
|
|
""",
|
|
"step_refs": CAPSTONE_STEP_REFS,
|
|
"anchor_code": CAPSTONE_ANCHOR,
|
|
"anchor_context_source": CAPSTONE_CONTEXT,
|
|
"anchor_context_name": "simulate_capstone_counts",
|
|
"analysis_code": """
|
|
LOCAL_BASIS = ["rz", "sx", "x", "cx"]
|
|
capstone_noise = build_demo_noise_model(
|
|
single_qubit_error=0.01,
|
|
two_qubit_error=0.04,
|
|
readout_error=0.02,
|
|
)
|
|
|
|
def ghz_candidate(style: str) -> QuantumCircuit:
|
|
circuit = QuantumCircuit(3, 3)
|
|
if style == "naive":
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.cx(0, 2)
|
|
elif style == "chain":
|
|
circuit.h(0)
|
|
circuit.cx(0, 1)
|
|
circuit.cx(1, 2)
|
|
elif style == "middle_root":
|
|
circuit.h(1)
|
|
circuit.cx(1, 0)
|
|
circuit.cx(1, 2)
|
|
else:
|
|
raise ValueError("style must be naive, chain, or middle_root")
|
|
circuit.measure([0, 1, 2], [0, 1, 2])
|
|
return circuit
|
|
|
|
def success_rate(counts: dict[str, int]) -> float:
|
|
probabilities = counts_to_probabilities(counts)
|
|
return round(probabilities.get("000", 0.0) + probabilities.get("111", 0.0), 3)
|
|
|
|
def benchmark(style: str) -> dict[str, object]:
|
|
circuit = ghz_candidate(style)
|
|
compiled = transpile_summary(
|
|
circuit,
|
|
BasicSimulator(),
|
|
basis_gates=LOCAL_BASIS,
|
|
coupling_map=line_coupling_map(3),
|
|
optimization_level=1,
|
|
)
|
|
noisy_counts = simulate_counts(
|
|
circuit,
|
|
shots=256,
|
|
noise_model=capstone_noise,
|
|
basis_gates=LOCAL_BASIS,
|
|
coupling_map=line_coupling_map(3),
|
|
optimization_level=1,
|
|
)
|
|
return {
|
|
"style": style,
|
|
"depth_after": compiled["depth_after"],
|
|
"size_after": compiled["size_after"],
|
|
"noisy_success": success_rate(noisy_counts),
|
|
"noisy_counts": noisy_counts,
|
|
}
|
|
|
|
[benchmark(style) for style in ["naive", "chain", "middle_root"]]
|
|
""",
|
|
"lecture_quiz_a": CAPSTONE_QUIZ_A,
|
|
"lecture_quiz_b": CAPSTONE_QUIZ_B,
|
|
"lecture_review_sections": [
|
|
"""
|
|
A strong self-check for the capstone is whether you can say what would have changed your mind. If a recommendation survives only because the notebook never states what competing evidence would matter, then the recommendation is weaker than it looks. Professional judgement is strongest when it names the conditions under which it would shift.
|
|
""",
|
|
"""
|
|
Another self-check is whether your recommendation still sounds defensible when the circuit drawing is removed. If the only thing left is a preference for one diagram, the review is not done. A complete design review should remain legible as a brief, a set of candidates, a body of evidence, and a final recommendation with risks.
|
|
""",
|
|
],
|
|
"lecture_reflections": [
|
|
"Explain why a capstone recommendation must come after candidate comparison rather than before it.",
|
|
"Describe one way a local comparison can still be professionally useful without claiming universal truth.",
|
|
],
|
|
"lab_intro": """
|
|
The lab makes the capstone mechanics concrete: define a fixed brief, compare a candidate family, and practice recommendation writing under stable constraints.
|
|
""",
|
|
"lab_protocol": """
|
|
Do not move the brief while comparing candidates. Keep the topology, basis model, and noisy lens fixed. Let the candidates change, not the rules of the game.
|
|
""",
|
|
"lab_sections": [
|
|
{
|
|
"heading": "Lab 1: Candidate Family",
|
|
"intro": """
|
|
Start by inspecting the candidate family under the same local constraint model. The challenge is not to pick instantly. The challenge is to keep all plausible candidates alive long enough to compare them honestly.
|
|
""",
|
|
"step_refs": CAPSTONE_STEP_REFS,
|
|
"editable_code": CAPSTONE_ANCHOR,
|
|
"editable_title": "Lab 1: Candidate Family",
|
|
"editable_instructions": "Switch between naive, chain, and middle_root candidates and explain what brief they are all trying to satisfy.",
|
|
"context_source": CAPSTONE_CONTEXT,
|
|
"context_name": "simulate_capstone_counts",
|
|
},
|
|
{
|
|
"heading": "Lab 2: Benchmark Under Constraints",
|
|
"intro": """
|
|
Now compare how the candidates behave once compiled and noisy pressure are taken seriously. This is the point where intuition must start sharing authority with evidence.
|
|
""",
|
|
"editable_code": CAPSTONE_COMPARE_EDITABLE,
|
|
"editable_title": "Lab 2: Benchmark Under Constraints",
|
|
"editable_instructions": "Keep the brief fixed while you compare candidates under the line-topology noisy preview. Do not anoint a winner until you can name the evidence.",
|
|
"context_source": CAPSTONE_CONTEXT,
|
|
"context_name": "simulate_capstone_counts",
|
|
},
|
|
{
|
|
"heading": "Lab 3: Recommendation Stress Test",
|
|
"intro": """
|
|
Finally, edit a review-oriented candidate and decide what would count as a responsible recommendation. The point is not to eliminate uncertainty. The point is to write with the right kind of conditional confidence.
|
|
""",
|
|
"editable_code": CAPSTONE_REVIEW_EDITABLE,
|
|
"editable_title": "Lab 3: Recommendation Stress Test",
|
|
"editable_instructions": "Use the extra-layer toggle and candidate choice to test how quickly a recommendation can become fragile when the brief is ignored.",
|
|
"context_source": CAPSTONE_CONTEXT,
|
|
"context_name": "simulate_capstone_counts",
|
|
},
|
|
],
|
|
"lab_quiz_a": CAPSTONE_LAB_QUIZ_A,
|
|
"lab_quiz_b": CAPSTONE_LAB_QUIZ_B,
|
|
"lab_debrief": """
|
|
After this lab, a design recommendation should feel less like a declaration and more like a compact argument. You now have practice keeping a brief fixed, comparing multiple candidates, and naming both the evidence and the scope limits of the conclusion.
|
|
""",
|
|
"lab_reflections": [
|
|
"Which candidate stayed plausible the longest in your comparison, and why?",
|
|
"Write a short recommendation sentence that names both a winner and one residual risk.",
|
|
],
|
|
"problem_intro": """
|
|
These problems test whether your design-review language is strong enough to survive small shifts in the brief, the evidence, and the final recommendation wording.
|
|
""",
|
|
"problem_how": """
|
|
Prefer answers that keep the brief explicit and tie the winner to evidence. Reject answers that jump straight from a favorite circuit to a conclusion.
|
|
""",
|
|
"problem_sets": CAPSTONE_PROBLEM_SETS,
|
|
"problem_case": """
|
|
Capstone notebooks often fail in one of two ways. Either they present only one candidate and call the result a review, or they compare several candidates but never define the criterion by which one should win. Both failures are forms of weak judgement. These problems are designed to make that weakness uncomfortable enough that you stop accepting it.
|
|
""",
|
|
"problem_reflections": [
|
|
"Explain why a candidate ranking with no explicit criterion is not yet a professional review.",
|
|
"Describe the minimum evidence you would want before accepting a capstone recommendation written by someone else.",
|
|
],
|
|
"problem_exit": """
|
|
Move on when you can explain the capstone as a brief-plus-candidates-plus-evidence-plus-recommendation workflow rather than as a final flashy notebook.
|
|
""",
|
|
"studio_intro": """
|
|
The final studio asks for the closest thing in the repository to a professional design memo.
|
|
""",
|
|
"studio_brief": """
|
|
Produce a compact local design review that states a brief, compares at least two plausible candidates under the same local constraints, and ends with a recommendation whose confidence and limits are both explicit.
|
|
""",
|
|
"studio_sections": [
|
|
{
|
|
"heading": "Studio Prompt 1: Freeze The Brief",
|
|
"intro": """
|
|
State the objective and constraints clearly enough that candidate comparison can stay honest.
|
|
""",
|
|
"editable_code": CAPSTONE_ANCHOR,
|
|
"editable_title": "Studio 1: Freeze The Brief",
|
|
"editable_instructions": "Use the candidate family as answers to one shared brief, not as unrelated demos.",
|
|
"context_source": CAPSTONE_CONTEXT,
|
|
"context_name": "simulate_capstone_counts",
|
|
},
|
|
{
|
|
"heading": "Studio Prompt 2: Compare The Evidence",
|
|
"intro": """
|
|
Keep at least two candidates alive and compare them through the same ideal, compiled, and noisy lenses.
|
|
""",
|
|
"editable_code": CAPSTONE_COMPARE_EDITABLE,
|
|
"editable_title": "Studio 2: Compare The Evidence",
|
|
"editable_instructions": "Do not choose a winner until the same evidence layers have been inspected for each plausible candidate.",
|
|
"context_source": CAPSTONE_CONTEXT,
|
|
"context_name": "simulate_capstone_counts",
|
|
},
|
|
{
|
|
"heading": "Studio Prompt 3: Write The Final Review",
|
|
"intro": """
|
|
End with a recommendation that another engineer could read, challenge, and still respect.
|
|
""",
|
|
"editable_code": CAPSTONE_REVIEW_EDITABLE,
|
|
"editable_title": "Studio 3: Final Review",
|
|
"editable_instructions": "Use the editable candidate to stress-test the final recommendation and write down what would change your mind.",
|
|
"context_source": CAPSTONE_CONTEXT,
|
|
"context_name": "simulate_capstone_counts",
|
|
},
|
|
],
|
|
"studio_quiz": CAPSTONE_STUDIO_QUIZ,
|
|
"studio_debrief": """
|
|
A successful final studio notebook should feel like a design review someone could actually use. It should communicate the brief, the candidate family, the evidence, the recommendation, and the residual uncertainty without hiding behind either hype or vagueness.
|
|
""",
|
|
"studio_reflections": [
|
|
"What brief did you ultimately freeze for the final review, and why was it appropriate?",
|
|
"Which evidence layer most influenced the final ranking of your candidates?",
|
|
"What residual risk did you include in the recommendation, and why did it matter?",
|
|
"Name the single most important course habit that now feels non-negotiable in professional circuit design.",
|
|
],
|
|
},
|
|
]
|
|
|
|
|
|
def build_lecture(module: dict) -> dict:
|
|
cells = [
|
|
markdown_cell(f"# {module['title']} Lecture"),
|
|
markdown_cell(module["lecture_intro"]),
|
|
markdown_cell(f"## Learning Objective\n\n{module['learning_objective']}"),
|
|
]
|
|
for section in module["lecture_sections"]:
|
|
cells.append(markdown_cell(section))
|
|
|
|
cells.extend(
|
|
[
|
|
code_cell(SETUP),
|
|
code_cell(COMMON_IMPORTS),
|
|
markdown_cell(f"## Code-To-Diagram Anchor\n\n{module['anchor_intro']}"),
|
|
code_cell(f"step_reference_table({module['step_refs']!r})"),
|
|
editable_lab_code(
|
|
module["anchor_code"],
|
|
title=f"{module['title']} Anchor",
|
|
instructions="Edit one structural burden at a time and use the reference table to keep the code and the engineering story aligned.",
|
|
context_source=module.get("anchor_context_source", ""),
|
|
context_name=module.get("anchor_context_name", "simulate_counts"),
|
|
),
|
|
code_cell(module["analysis_code"]),
|
|
quiz_code(module["lecture_quiz_a"], "Lecture Checkpoint A"),
|
|
]
|
|
)
|
|
|
|
for section in module["lecture_review_sections"]:
|
|
cells.append(markdown_cell(section))
|
|
|
|
cells.extend(
|
|
[
|
|
markdown_cell(
|
|
"""
|
|
## Reading Discipline For This Module
|
|
|
|
The professional band demands slower reading than the earlier bands because the unit of judgment is larger. You are no longer inspecting only a circuit body. You are inspecting a design brief, a constraint model, a verification story, or a recommendation workflow. That means every notebook should be read with questions like these in mind: what burden is this stage carrying, what evidence would justify it, and what kind of failure would falsify the current explanation? If those questions stay active while you read, the notebook becomes training. If they disappear, the notebook becomes performance.
|
|
|
|
Another discipline worth installing here is conditional confidence. Professional engineering rarely says only "this works." It says "under these assumptions, with this evidence, this is the choice I recommend." That conditional phrasing is not weakness. It is rigor. The purpose of the final band is to make that rigor normal in both your code and your prose.
|
|
"""
|
|
),
|
|
markdown_cell(
|
|
"""
|
|
## Professional Review Habit
|
|
|
|
Another habit worth building here is review-minded reading. Do not read these notebooks only as the author of the current code cell. Read them as the future reviewer who must decide whether the workflow, redesign, diagnosis, or recommendation is trustworthy. That reviewer wants to know what assumptions were fixed, what evidence was gathered, what remained uncertain, and what could still break if the surrounding constraints changed. Practicing that perspective now is what turns the final band into professional training instead of advanced entertainment.
|
|
"""
|
|
),
|
|
markdown_cell(
|
|
"""
|
|
## Forward Link
|
|
|
|
Every later notebook artifact in a real project will inherit the standards practiced here. Workflow patterns affect how experiments are reproduced. Hardware-aware redesign affects whether ideal elegance survives implementation. Verification determines whether bad results are diagnosed honestly. And capstone review determines whether a chosen circuit can actually be defended. The point of this band is not to add optional polish. It is to make the entire project behave like professional engineering work.
|
|
"""
|
|
),
|
|
quiz_code(module["lecture_quiz_b"], "Lecture Checkpoint B"),
|
|
reflection_code(module["lecture_reflections"][0]),
|
|
reflection_code(module["lecture_reflections"][1]),
|
|
feedback_panel_code(
|
|
title=f"{module['title']} Lecture Revision Loop",
|
|
prompt=(
|
|
"State the judgement this lecture is training, the strongest evidence that would justify it, "
|
|
"the current weakness in your own explanation, and the next revision you should make."
|
|
),
|
|
),
|
|
rubric_code("module_self_review", f"{module['title']} Lecture Self-Grading"),
|
|
markdown_cell(
|
|
"## Mastery Gate\n\nLeave this lecture only when you can explain what evidence would justify the final professional judgment and what evidence would force you to revise it."
|
|
),
|
|
]
|
|
)
|
|
return notebook(cells)
|
|
|
|
|
|
def build_lab(module: dict) -> dict:
|
|
cells = [
|
|
markdown_cell(f"# {module['title']} Lab"),
|
|
markdown_cell(module["lab_intro"]),
|
|
markdown_cell(f"## Lab Protocol\n\n{module['lab_protocol']}"),
|
|
code_cell(SETUP),
|
|
code_cell(COMMON_IMPORTS),
|
|
]
|
|
|
|
first_lab = True
|
|
for section in module["lab_sections"]:
|
|
cells.append(markdown_cell(f"## {section['heading']}\n\n{section['intro']}"))
|
|
if section.get("step_refs") is not None:
|
|
cells.append(code_cell(f"step_reference_table({section['step_refs']!r})"))
|
|
cells.append(
|
|
editable_lab_code(
|
|
section["editable_code"],
|
|
title=section["editable_title"],
|
|
instructions=section["editable_instructions"],
|
|
context_source=section.get("context_source", ""),
|
|
context_name=section.get("context_name", "simulate_counts"),
|
|
)
|
|
)
|
|
if first_lab:
|
|
cells.append(quiz_code(module["lab_quiz_a"], "Lab Checkpoint A"))
|
|
cells.append(reflection_code(module["lab_reflections"][0]))
|
|
first_lab = False
|
|
|
|
cells.extend(
|
|
[
|
|
quiz_code(module["lab_quiz_b"], "Lab Checkpoint B"),
|
|
markdown_cell(f"## Lab Debrief\n\n{module['lab_debrief']}"),
|
|
markdown_cell(
|
|
"""
|
|
## Why The Lab Is Slower Than A Demo
|
|
|
|
These labs are built to slow down the moment where many learners usually rush. In professional work, the difference between a good decision and a weak one often depends on whether you changed one variable at a time, whether you kept the objective fixed, and whether you wrote down what result you expected before running the next cell. That is why the labs here are not just demonstrations. They are rehearsals for disciplined engineering comparison.
|
|
"""
|
|
),
|
|
markdown_cell(
|
|
"""
|
|
## Prediction Ledger
|
|
|
|
If the comparison starts to blur, return to a prediction ledger. Write down what should stay invariant, what metric or observation should move, and what conclusion would follow if it does. That simple habit will make your later capstone work far stronger because it converts trial-and-error into interpretable evidence.
|
|
"""
|
|
),
|
|
reflection_code(module["lab_reflections"][1]),
|
|
reflection_code("Write one prediction habit from this lab that you want to preserve in later professional work."),
|
|
feedback_panel_code(
|
|
title=f"{module['title']} Lab Feedback Loop",
|
|
prompt=(
|
|
"Turn the lab into a review note: state the current claim, cite the strongest evidence, "
|
|
"name the main remaining risk, and write the next comparison you would run."
|
|
),
|
|
),
|
|
rubric_code("module_self_review", f"{module['title']} Lab Self-Grading"),
|
|
]
|
|
)
|
|
return notebook(cells)
|
|
|
|
|
|
def build_problems(module: dict) -> dict:
|
|
cells = [
|
|
markdown_cell(f"# {module['title']} Problems"),
|
|
markdown_cell(module["problem_intro"]),
|
|
markdown_cell(f"## How To Use This Notebook\n\n{module['problem_how']}"),
|
|
code_cell(SETUP),
|
|
code_cell(COMMON_IMPORTS),
|
|
]
|
|
|
|
for heading, questions in module["problem_sets"]:
|
|
cells.append(
|
|
markdown_cell(
|
|
f"## {heading}\n\nTreat these questions as miniature review situations rather than as trivia. The correct answer should survive a serious engineering conversation."
|
|
)
|
|
)
|
|
cells.append(quiz_code(questions, heading))
|
|
|
|
cells.extend(
|
|
[
|
|
markdown_cell(f"## Mini Case\n\n{module['problem_case']}"),
|
|
markdown_cell(
|
|
"""
|
|
## What These Questions Are Really Testing
|
|
|
|
The multiple-choice format is only the surface. Underneath it, the notebook is testing whether you can preserve the right burdens while the wording shifts. If your understanding is strong, a changed phrase still points you back to the same structure, constraint, invariant, or recommendation logic. If your understanding is weak, the wording change will tempt you into vague or prestige-based answers. That is why these problem sets matter.
|
|
"""
|
|
),
|
|
markdown_cell(
|
|
"""
|
|
## Common Failure Mode
|
|
|
|
A common failure mode in advanced notebook work is broad correctness with local vagueness. A learner says something true at a high level, yet still fails to name what this particular workflow, redesign, diagnosis, or recommendation is actually doing. The problems notebook is meant to squeeze that vagueness out. It prefers circuit-specific or review-specific sentences over vague correctness.
|
|
"""
|
|
),
|
|
markdown_cell(
|
|
"""
|
|
## Why Written Justification Matters
|
|
|
|
The written prompts at the end of this notebook are not filler. They are where you prove that the right distinctions are surviving contact with your own prose. Multiple-choice blocks can tell you whether a distinction still feels recognizable. A written answer tells you whether you can actually deploy that distinction in a design note, verification memo, or review comment. Professional skill depends on that second ability.
|
|
"""
|
|
),
|
|
markdown_cell("## Written Checks\n\nUse the prompts below to practice full-sentence engineering judgement."),
|
|
reflection_code(module["problem_reflections"][0]),
|
|
reflection_code(module["problem_reflections"][1]),
|
|
feedback_panel_code(
|
|
title=f"{module['title']} Problem-Set Review Loop",
|
|
prompt=(
|
|
"After the quizzes, write the claim you now trust most, the evidence pattern behind it, "
|
|
"the phrasing that still feels weak, and the next rewrite you owe yourself."
|
|
),
|
|
),
|
|
rubric_code("module_self_review", f"{module['title']} Problem Self-Grading"),
|
|
markdown_cell(f"## Exit Condition\n\n{module['problem_exit']}"),
|
|
]
|
|
)
|
|
return notebook(cells)
|
|
|
|
|
|
def build_studio(module: dict) -> dict:
|
|
cells = [
|
|
markdown_cell(f"# {module['title']} Studio"),
|
|
markdown_cell(module["studio_intro"]),
|
|
markdown_cell(f"## Design Brief\n\n{module['studio_brief']}"),
|
|
code_cell(SETUP),
|
|
code_cell(COMMON_IMPORTS),
|
|
]
|
|
|
|
for section in module["studio_sections"]:
|
|
cells.append(markdown_cell(f"## {section['heading']}\n\n{section['intro']}"))
|
|
cells.append(
|
|
editable_lab_code(
|
|
section["editable_code"],
|
|
title=section["editable_title"],
|
|
instructions=section["editable_instructions"],
|
|
context_source=section.get("context_source", ""),
|
|
context_name=section.get("context_name", "simulate_counts"),
|
|
)
|
|
)
|
|
|
|
cells.extend(
|
|
[
|
|
quiz_code(module["studio_quiz"], "Studio Design Check"),
|
|
checklist_code(
|
|
CAPSTONE_REVIEW_CHECKLIST
|
|
if module["dir"] == MODULE_04_DIR
|
|
else GENERAL_REVIEW_CHECKLIST,
|
|
f"{module['title']} Evidence Checklist",
|
|
),
|
|
markdown_cell(f"## Studio Debrief\n\n{module['studio_debrief']}"),
|
|
markdown_cell(
|
|
"""
|
|
## Studio Standard
|
|
|
|
A strong studio notebook is compact, explicit, and reviewable. It does not hide behind volume. It makes the objective clear, the candidate or case structure visible, the evidence traceable, and the final judgement conditional in the right way. If those things are not yet present, the notebook is not finished no matter how many cells it contains.
|
|
"""
|
|
),
|
|
markdown_cell(
|
|
"""
|
|
## What A Finished Studio Should Feel Like
|
|
|
|
The finished notebook should feel like something another engineer could open and use. They should be able to understand what the notebook is trying to decide, how the circuits were compared, what evidence was gathered, and why the recommendation or diagnosis ended where it did. That is the practical definition of "world-class" in this project: not theatrical polish, but concentrated clarity under real engineering burdens.
|
|
"""
|
|
),
|
|
markdown_cell(
|
|
"""
|
|
## Final Check Before You Stop
|
|
|
|
Before you leave a studio notebook, ask one last question: if another engineer disagreed with my conclusion, would the notebook give them enough material to locate the disagreement precisely? If the answer is yes, the studio is doing its job.
|
|
"""
|
|
),
|
|
reflection_code(module["studio_reflections"][0]),
|
|
reflection_code(module["studio_reflections"][1]),
|
|
reflection_code(module["studio_reflections"][2]),
|
|
reflection_code(module["studio_reflections"][3]),
|
|
feedback_panel_code(
|
|
title=f"{module['title']} Studio Revision Loop",
|
|
prompt=(
|
|
"Write the decision or diagnosis the studio currently supports, the evidence carrying that weight, "
|
|
"the main remaining uncertainty, and the next revision that would sharpen the notebook."
|
|
),
|
|
),
|
|
rubric_code(
|
|
"capstone_design_review"
|
|
if module["dir"] == MODULE_04_DIR
|
|
else "module_self_review",
|
|
f"{module['title']} Studio Self-Grading",
|
|
),
|
|
]
|
|
)
|
|
return notebook(cells)
|
|
|
|
|
|
def main() -> None:
|
|
outputs: dict[Path, dict] = {}
|
|
for module in MODULES:
|
|
outputs[module["dir"] / "lecture.ipynb"] = build_lecture(module)
|
|
outputs[module["dir"] / "lab.ipynb"] = build_lab(module)
|
|
outputs[module["dir"] / "problems.ipynb"] = build_problems(module)
|
|
outputs[module["dir"] / "studio.ipynb"] = build_studio(module)
|
|
|
|
for path, payload in outputs.items():
|
|
write_notebook(path, payload)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|