// Endpoint data — single source of truth for every /docs/* page.
// Each entry feeds the EndpointPage template.

const ENDPOINTS = {
  'video/best-frames': {
    group: 'video',
    title: 'best-frames',
    path: '/v1/video/best-frames',
    method: 'POST',
    short: 'Return the clearest, most informative frames in a video — ranked.',
    when: 'Use when you need still images for thumbnails, RAG indexing, or visual summaries — and want the API to pick the best ones rather than sampling at fixed intervals.',
    price: '$0.04 / video min',
    params: [
      ['media_url',     'string',         'Public or signed URL to the source video.', true],
      ['max_frames',    'integer',        'Maximum number of frames to return (default 8, max 64).'],
      ['start',         'number',         'Window start in seconds. Default 0.'],
      ['end',           'number',         'Window end in seconds. Default media duration.'],
      ['min_gap',       'number',         'Minimum seconds between selected frames (default 4).'],
      ['quality',       '"low"|"std"|"high"', 'Processing quality. Default "std".'],
    ],
    responseFields: [
      ['frames',        'Frame[]',        'Ordered list of selected frames, best first.'],
      ['frames[].t',    'number',         'Timestamp of the frame in seconds.'],
      ['frames[].url',  'string',         'Signed URL to the JPEG (24h).'],
      ['frames[].score','number',         'Composite quality + informativeness score (0–1).'],
      ['media_seconds', 'number',         'Duration of the analyzed media in seconds.'],
      ['price_usd',     'number',         'Final price for this call.'],
    ],
    requestExample: {
      'media_url': 'https://cdn.momentiq.dev/demo/lecture-08.mp4',
      'max_frames': 8,
      'min_gap': 6,
    },
    responseExample: {
      'endpoint': 'video/best-frames',
      'media_seconds': 1742,
      'frames': [
        { 't': 312.4, 'score': 0.94 },
        { 't': 618.0, 'score': 0.91 },
        { 't': 901.7, 'score': 0.88 },
      ],
      'price_usd': 1.16,
    },
    related: ['video/best-frame-near','video/dedupe-frames','video/thumbnail-score','video/text-frames'],
  },

  'video/best-frame-near': {
    group: 'video', title: 'best-frame-near', path: '/v1/video/best-frame-near', method: 'POST',
    short: 'Return the single best still image near a target timestamp.',
    when: 'Use to grab a clean cover frame near an event you already know about — a laugh, a slide change, a chapter mark.',
    price: '$0.03 / video min',
    params: [
      ['media_url','string','Public or signed URL to the source video.', true],
      ['t','number','Target timestamp in seconds.', true],
      ['window','number','Search window in seconds around t (default 4).'],
      ['min_score','number','Reject if best score below this threshold (default 0.4).'],
    ],
    responseFields: [
      ['frame.t','number','Timestamp of the chosen frame.'],
      ['frame.url','string','Signed URL to the JPEG.'],
      ['frame.score','number','Composite quality score (0–1).'],
      ['price_usd','number','Final price for this call.'],
    ],
    requestExample: { 'media_url':'https://cdn.../ep-42.mp4', 't': 862.4, 'window': 3 },
    responseExample: { 'frame': { 't': 861.8, 'score': 0.86 }, 'price_usd': 0.03 },
    related: ['video/best-frames','video/thumbnail-score','audio/detect-laughter'],
  },

  'video/text-frames': {
    group: 'video', title: 'text-frames', path: '/v1/video/text-frames', method: 'POST',
    short: 'Find frames that contain readable text — slides, whiteboards, dashboards, terminals.',
    when: 'Use to extract slide-aligned material from lectures, demos, screen recordings — perfect for RAG indexing or building lecture notes.',
    price: '$0.04 / video min',
    params: [
      ['media_url','string','Public or signed URL to the source video.', true],
      ['min_text_density','number','Minimum fraction of frame area covered by text (0–1, default 0.05).'],
      ['dedupe','boolean','Drop near-duplicate frames (default true).'],
      ['ocr','boolean','Include OCR text in response (default false).'],
    ],
    responseFields: [
      ['frames','Frame[]','List of frames with readable text.'],
      ['frames[].t','number','Timestamp.'],
      ['frames[].text','string','OCR text (if ocr=true).'],
      ['frames[].kind','"slide"|"whiteboard"|"screen"|"other"','Inferred surface type.'],
    ],
    requestExample: { 'media_url':'https://cdn.../lecture.mp4', 'ocr': true, 'dedupe': true },
    responseExample: {
      'frames': [
        { 't': 184.2, 'kind': 'slide', 'text': 'Lecture 8 — Differentiation under the integral sign' },
        { 't': 642.8, 'kind': 'slide', 'text': 'Leibniz rule: d/dx ∫_a(x)^b(x) f(x,t) dt = …' },
      ],
      'price_usd': 1.16,
    },
    related: ['video/dedupe-frames','audio/semantic-chunks','timeline/merge'],
  },

  'video/detect-cuts': {
    group: 'video', title: 'detect-cuts', path: '/v1/video/detect-cuts', method: 'POST',
    short: 'Detect hard visual changes — shot boundaries, slide swaps, scene jumps.',
    when: 'Use to chapter long content, build scrubbable previews, or feed video chapters into your RAG index.',
    price: '$0.02 / video min',
    params: [
      ['media_url','string','Public or signed URL to the source video.', true],
      ['sensitivity','number','How aggressive cut detection is (0–1, default 0.55).'],
      ['min_segment','number','Minimum seconds between adjacent cuts (default 0.4).'],
    ],
    responseFields: [
      ['cuts','number[]','Timestamps of cuts in seconds.'],
      ['segments','{start,end}[]','Convenience segments derived from cuts.'],
    ],
    requestExample: { 'media_url':'https://cdn.../keynote.mp4', 'sensitivity': 0.6 },
    responseExample: {
      'cuts': [12.4, 47.1, 102.6, 188.3, 244.9],
      'segments': [{ 'start':0,'end':12.4 }, { 'start':12.4,'end':47.1 }],
      'price_usd': 0.34,
    },
    related: ['video/text-frames','audio/suggest-cut-points','timeline/merge'],
  },

  'video/dedupe-frames': {
    group: 'video', title: 'dedupe-frames', path: '/v1/video/dedupe-frames', method: 'POST',
    short: 'Remove visually-repetitive frames from a list — keep one representative per cluster.',
    when: 'Use after best-frames or text-frames to ensure unique, non-redundant outputs for thumbnails or notes.',
    price: '$0.02 / video min',
    params: [
      ['frames','Frame[]','Frames to dedupe.', true],
      ['threshold','number','Similarity threshold (0–1, default 0.92).'],
    ],
    responseFields: [
      ['frames','Frame[]','Deduplicated frames, ordered by timestamp.'],
      ['removed','integer','Number of frames removed.'],
    ],
    requestExample: { 'frames': [{ 't': 12.4 }, { 't': 12.6 }, { 't': 88.0 }], 'threshold': 0.9 },
    responseExample: { 'frames': [{ 't': 12.4 }, { 't': 88.0 }], 'removed': 1, 'price_usd': 0.02 },
    related: ['video/best-frames','video/text-frames','video/thumbnail-score'],
  },

  'video/thumbnail-score': {
    group: 'video', title: 'thumbnail-score', path: '/v1/video/thumbnail-score', method: 'POST',
    short: 'Score frames for use as thumbnails — face presence, expression, contrast, focal interest.',
    when: 'Use after best-frames to pick the most click-worthy thumbnail from a candidate set.',
    price: '$0.04 / video min',
    params: [
      ['frames','Frame[]','Frames to score.', true],
      ['target','"clip"|"video"|"chapter"','Tunes the scorer for context.'],
    ],
    responseFields: [
      ['frames','Frame[]','Frames sorted by descending thumbnail_score.'],
      ['frames[].thumbnail_score','number','0–1 thumbnail suitability.'],
      ['frames[].reasons','string[]','Short why-list ("face_present", "high_contrast").'],
    ],
    requestExample: { 'frames':[{ 't': 312 },{ 't': 901 }], 'target': 'clip' },
    responseExample: {
      'frames': [
        { 't': 901, 'thumbnail_score': 0.91, 'reasons': ['face_present','direct_gaze','high_contrast'] },
        { 't': 312, 'thumbnail_score': 0.62, 'reasons': ['high_contrast'] },
      ],
      'price_usd': 0.04,
    },
    related: ['video/best-frames','video/best-frame-near','video/dedupe-frames'],
  },

  'video/clip-window': {
    group: 'video', title: 'clip-window', path: '/v1/video/clip-window', method: 'POST',
    short: 'Cut a clip from start to end. Returns a new media URL.',
    when: 'Use after suggest-ranges or your own logic to render a real, playable clip.',
    price: '$0.05 / processed min',
    params: [
      ['media_url','string','Source media URL.', true],
      ['start','number','Start in seconds.', true],
      ['end','number','End in seconds.', true],
      ['format','"mp4"|"mov"|"mp3"','Output format. Default mp4.'],
    ],
    responseFields: [
      ['url','string','Signed URL to the rendered clip.'],
      ['duration','number','Clip duration in seconds.'],
    ],
    requestExample: { 'media_url':'https://cdn.../ep-42.mp4', 'start': 862, 'end': 894 },
    responseExample: { 'url':'https://cdn.momentiq.dev/clips/abc123.mp4', 'duration': 32, 'price_usd': 0.03 },
    related: ['video/clip-near','timeline/suggest-ranges','audio/isolate-speech'],
  },

  'video/clip-near': {
    group: 'video', title: 'clip-near', path: '/v1/video/clip-near', method: 'POST',
    short: 'Cut a clip centered on a timestamp, with smart boundary alignment to silence and cuts.',
    when: 'Use to turn a single moment (a laugh, a quote) into a render-ready clip without writing boundary logic.',
    price: '$0.05 / processed min',
    params: [
      ['media_url','string','Source media URL.', true],
      ['t','number','Target timestamp in seconds.', true],
      ['target_duration','number','Desired clip length in seconds (default 30).'],
      ['snap','boolean','Snap boundaries to nearby silence/cuts (default true).'],
    ],
    responseFields: [
      ['url','string','Signed URL to the rendered clip.'],
      ['start','number','Final start.'],
      ['end','number','Final end.'],
    ],
    requestExample: { 'media_url':'https://cdn.../ep-42.mp4', 't': 862.4, 'target_duration': 30 },
    responseExample: { 'url':'https://cdn.../clips/x.mp4', 'start': 850.1, 'end': 882.6, 'price_usd': 0.03 },
    related: ['video/clip-window','audio/detect-silence','timeline/find-nearest'],
  },

  'audio/detect-silence': {
    group: 'audio', title: 'detect-silence', path: '/v1/audio/detect-silence', method: 'POST',
    short: 'Find silence regions — natural cut points, breath pauses, gaps.',
    when: 'Use to align edits to natural pauses, trim dead air, or feed boundaries into clip-near.',
    price: '$0.01 / audio min',
    params: [
      ['media_url','string','Public or signed URL to the source media.', true],
      ['min_duration','number','Minimum silence length in seconds (default 0.3).'],
      ['threshold_db','number','Silence threshold in dB below mean (default -35).'],
    ],
    responseFields: [
      ['silences','{start,end}[]','Silence regions in seconds.'],
    ],
    requestExample: { 'media_url':'https://cdn.../ep-42.mp4', 'min_duration': 0.4 },
    responseExample: {
      'silences':[{ 'start':12.1,'end':12.6 },{ 'start':88.4,'end':89.0 }],
      'price_usd': 0.08,
    },
    related: ['audio/suggest-cut-points','audio/detect-energy','timeline/merge'],
  },

  'audio/detect-laughter': {
    group: 'audio', title: 'detect-laughter', path: '/v1/audio/detect-laughter', method: 'POST',
    short: 'Detect laughter moments with confidence scores.',
    when: 'Use to find the funny parts of a podcast or stream — feed them into clip-near to ship clips.',
    price: '$0.04 / audio min',
    params: [
      ['media_url','string','Public or signed URL to the source media.', true],
      ['sensitivity','number','How eager the detector is (0–1, default 0.55).'],
      ['min_duration','number','Minimum laughter length in seconds (default 0.4).'],
    ],
    responseFields: [
      ['results','{t,dur,conf}[]','Laughter events with timestamp, duration, confidence.'],
    ],
    requestExample: { 'media_url':'https://cdn.../ep-42.mp4', 'sensitivity': 0.62 },
    responseExample: {
      'results':[
        { 't': 862.4, 'dur': 1.8, 'conf': 0.94 },
        { 't': 948.1, 'dur': 0.9, 'conf': 0.81 },
      ],
      'price_usd': 0.21,
    },
    related: ['video/clip-near','audio/detect-silence','timeline/suggest-ranges'],
  },

  'audio/detect-energy': {
    group: 'audio', title: 'detect-energy', path: '/v1/audio/detect-energy', method: 'POST',
    short: 'Score audio energy over time — find the loud, intense, or quiet parts.',
    when: 'Use to surface emotionally-charged moments, music drops, intense Q&A, or for an energy heatmap.',
    price: '$0.02 / audio min',
    params: [
      ['media_url','string','Public or signed URL.', true],
      ['resolution','number','Sampling resolution in seconds (default 1).'],
    ],
    responseFields: [
      ['energy','number[]','Energy values 0–1 per resolution step.'],
      ['peaks','{t,score}[]','Local energy peaks.'],
    ],
    requestExample: { 'media_url':'https://cdn.../ep-42.mp4', 'resolution': 0.5 },
    responseExample: { 'peaks':[{ 't': 312.5, 'score': 0.92 }], 'price_usd': 0.18 },
    related: ['audio/detect-laughter','audio/detect-music','timeline/merge'],
  },

  'audio/detect-speakers': {
    group: 'audio', title: 'detect-speakers', path: '/v1/audio/detect-speakers', method: 'POST',
    short: 'Speaker diarization — who spoke when, with optional overlap detection.',
    when: 'Use for meeting memory, podcast transcripts with speaker tags, or call analytics.',
    price: '$0.05 / audio min',
    params: [
      ['media_url','string','Public or signed URL.', true],
      ['estimated_speakers','integer','Optional hint for the number of speakers.'],
      ['detect_overlap','boolean','Return overlap regions (default true).'],
    ],
    responseFields: [
      ['speakers','{id,share,segments}[]','Per-speaker totals.'],
      ['segments','{t,dur,id}[]','Per-segment diarization.'],
      ['overlaps','{t,dur,ids}[]','Overlap regions.'],
    ],
    requestExample: { 'media_url':'https://cdn.../meeting.mp4', 'estimated_speakers': 3 },
    responseExample: {
      'speakers':[{ 'id':'A','share':0.38,'segments':14 },{ 'id':'B','share':0.31,'segments':11 },{ 'id':'C','share':0.28,'segments':9 }],
      'overlaps':[{ 't': 412.7, 'dur': 1.2, 'ids':['A','B'] }],
      'price_usd': 0.89,
    },
    related: ['audio/semantic-chunks','audio/isolate-speech','timeline/merge'],
  },

  'audio/detect-music': {
    group: 'audio', title: 'detect-music', path: '/v1/audio/detect-music', method: 'POST',
    short: 'Detect music sections and speech-vs-music overlap.',
    when: 'Use to skip music in podcast intros, find scored sections in trailers, or align cuts to music drops.',
    price: '$0.03 / audio min',
    params: [
      ['media_url','string','Public or signed URL.', true],
      ['min_duration','number','Minimum music section in seconds (default 1.5).'],
    ],
    responseFields: [
      ['music','{start,end,confidence}[]','Music regions.'],
      ['mixed','{start,end}[]','Regions with overlapping speech + music.'],
    ],
    requestExample: { 'media_url':'https://cdn.../ep-42.mp4' },
    responseExample: { 'music':[{ 'start':0,'end':24.8,'confidence':0.97 }], 'mixed':[{ 'start':22.4,'end':24.8 }], 'price_usd': 0.13 },
    related: ['audio/isolate-speech','audio/detect-energy','audio/suggest-cut-points'],
  },

  'audio/isolate-speech': {
    group: 'audio', title: 'isolate-speech', path: '/v1/audio/isolate-speech', method: 'POST',
    short: 'Return a clean speech-only audio track with noise and music removed.',
    when: 'Use as preprocessing for transcription, podcast cleanup, or before feeding to your LLM.',
    price: '$0.06 / processed min',
    params: [
      ['media_url','string','Public or signed URL.', true],
      ['format','"mp3"|"wav"','Output format. Default mp3.'],
      ['preserve_levels','boolean','Keep relative loudness instead of normalizing.'],
    ],
    responseFields: [
      ['url','string','Signed URL to the cleaned audio.'],
      ['duration','number','Output duration in seconds.'],
    ],
    requestExample: { 'media_url':'https://cdn.../ep-42.mp4', 'format': 'mp3' },
    responseExample: { 'url':'https://cdn.../clean/abc.mp3', 'duration': 2892, 'price_usd': 2.89 },
    related: ['audio/detect-music','audio/detect-speakers','video/clip-window'],
  },

  'audio/suggest-cut-points': {
    group: 'audio', title: 'suggest-cut-points', path: '/v1/audio/suggest-cut-points', method: 'POST',
    short: 'Suggest natural edit boundaries — silence + speaker changes + topic shifts.',
    when: 'Use to render clips that start/end at clean, natural points — never mid-syllable.',
    price: '$0.03 / audio min',
    params: [
      ['media_url','string','Public or signed URL.', true],
      ['around','number[]','Optional list of timestamps to search near.'],
    ],
    responseFields: [
      ['cuts','{t,quality,reason}[]','Suggested cut points.'],
    ],
    requestExample: { 'media_url':'https://cdn.../ep-42.mp4', 'around':[862.4] },
    responseExample: {
      'cuts':[{ 't': 850.1, 'quality':0.92, 'reason':'silence_after' },{ 't': 882.6, 'quality':0.88, 'reason':'speaker_change' }],
      'price_usd': 0.10,
    },
    related: ['audio/detect-silence','video/clip-near','timeline/suggest-ranges'],
  },

  'audio/semantic-chunks': {
    group: 'audio', title: 'semantic-chunks', path: '/v1/audio/semantic-chunks', method: 'POST',
    short: 'Split audio by topic and speaker — not by fixed seconds.',
    when: 'Use to index podcasts, lectures, and meetings into RAG with chunks that respect topic boundaries.',
    price: '$0.05 / audio min',
    params: [
      ['media_url','string','Public or signed URL.', true],
      ['target_chunk_seconds','number','Soft target chunk length in seconds (default 90).'],
    ],
    responseFields: [
      ['chunks','{start,end,topic,speakers}[]','Topic-aware chunks.'],
    ],
    requestExample: { 'media_url':'https://cdn.../lecture.mp4', 'target_chunk_seconds': 60 },
    responseExample: {
      'chunks':[
        { 'start':0,'end':62.1,'topic':'intro','speakers':['A'] },
        { 'start':62.1,'end':148.4,'topic':'leibniz_rule','speakers':['A'] },
      ],
      'price_usd': 1.45,
    },
    related: ['audio/detect-speakers','video/text-frames','timeline/merge'],
  },

  'timeline/merge': {
    group: 'timeline', title: 'merge', path: '/v1/timeline/merge', method: 'POST',
    short: 'Combine results from many endpoints into a single shared timeline.',
    when: 'Use as the canonical "what happened in this media" object — feed it to your model, store it, query it.',
    price: 'low fixed / request',
    params: [
      ['media_seconds','number','Duration of the source media in seconds.', true],
      ['signals','Signal[]','Array of signal objects from any other endpoint.', true],
    ],
    responseFields: [
      ['timeline','Event[]','Unified, sorted event list.'],
      ['by_kind','{[kind]: Event[]}','Events grouped by signal kind.'],
    ],
    requestExample: {
      'media_seconds': 2892,
      'signals':[
        { 'kind':'laughter', 'events':[{ 't':862.4,'dur':1.8 }] },
        { 'kind':'silence',  'events':[{ 't':859.9,'dur':0.4 }] },
      ],
    },
    responseExample: {
      'timeline':[
        { 'kind':'silence',  't':859.9,'dur':0.4 },
        { 'kind':'laughter', 't':862.4,'dur':1.8 },
      ],
      'price_usd': 0.001,
    },
    related: ['timeline/find-nearest','timeline/suggest-ranges','audio/detect-laughter'],
  },

  'timeline/find-nearest': {
    group: 'timeline', title: 'find-nearest', path: '/v1/timeline/find-nearest', method: 'POST',
    short: 'Given a timestamp and a kind, return the nearest event of that kind.',
    when: 'Use when chaining endpoints — e.g. "find the silence right before this laugh".',
    price: 'low fixed / request',
    params: [
      ['timeline','Event[]','Timeline produced by /timeline/merge.', true],
      ['t','number','Reference timestamp in seconds.', true],
      ['kind','string','Event kind to search for.', true],
      ['direction','"before"|"after"|"any"','Search direction. Default "any".'],
    ],
    responseFields: [
      ['event','Event','The nearest event, or null.'],
      ['delta','number','Seconds from t to the event.'],
    ],
    requestExample: {
      'timeline':[
        { 'kind':'silence',  't': 859.9, 'dur': 0.4 },
        { 'kind':'laughter', 't': 862.4, 'dur': 1.8 },
      ],
      't': 862.4, 'kind': 'silence', 'direction': 'before',
    },
    responseExample: { 'event': { 'kind':'silence','t':859.9,'dur':0.4 }, 'delta': -2.5, 'price_usd': 0.0005 },
    related: ['timeline/merge','timeline/suggest-ranges','audio/detect-silence'],
  },

  'timeline/suggest-ranges': {
    group: 'timeline', title: 'suggest-ranges', path: '/v1/timeline/suggest-ranges', method: 'POST',
    short: 'Suggest start/end ranges from many signals — your one-call clip recommender.',
    when: 'Use to convert a merged timeline into render-ready clip ranges. Common chain endpoint for podcast clip engines.',
    price: 'low fixed / request',
    params: [
      ['timeline','Event[]','Merged timeline.', true],
      ['anchors','string[]','Event kinds that mark interesting moments (e.g. ["laughter","energy_peak"]).', true],
      ['boundary_kinds','string[]','Event kinds used as boundary candidates (default ["silence","speaker_change"]).'],
      ['min_clip','number','Min clip length in seconds (default 8).'],
      ['max_clip','number','Max clip length in seconds (default 60).'],
    ],
    responseFields: [
      ['ranges','{start,end,reason}[]','Suggested clip ranges.'],
    ],
    requestExample: {
      'timeline':[
        { 'kind':'silence',     'start': 840.0, 'end': 850.1, 'confidence': 0.91 },
        { 'kind':'laughter',    'start': 862.4, 'end': 864.2, 'confidence': 0.94 },
        { 'kind':'energy_peak', 'start': 866.0, 'end': 875.5, 'confidence': 0.88 },
        { 'kind':'silence',     'start': 882.6, 'end': 886.0, 'confidence': 0.89 },
      ],
      'anchors':['laughter','energy_peak'],
      'min_clip': 12, 'max_clip': 45,
    },
    responseExample: {
      'ranges':[
        { 'start':850.1,'end':882.6,'reason':'laughter_anchor' },
        { 'start':1402.4,'end':1438.0,'reason':'energy_peak_anchor' },
      ],
      'price_usd': 0.001,
    },
    related: ['timeline/merge','video/clip-window','audio/detect-laughter'],
  },
};

// Endpoint groups — first-class metadata.
const GROUPS = {
  video: {
    id: 'video',
    name: 'Video',
    pathPrefix: '/v1/video/*',
    desc: 'Frame extraction, cuts, thumbnails, clipping.',
    blurb: 'Frames, cuts, thumbnails, clipping.',
    color: 'oklch(0.62 0.18 250)',
  },
  audio: {
    id: 'audio',
    name: 'Audio',
    pathPrefix: '/v1/audio/*',
    desc: 'Speech, silence, speakers, laughter, music, energy.',
    blurb: 'Speech, silence, speakers, laughter, music.',
    color: 'oklch(0.70 0.16 52)',
  },
  timeline: {
    id: 'timeline',
    name: 'Timeline',
    pathPrefix: '/v1/timeline/*',
    desc: 'Combine video + audio signals into one media timeline.',
    blurb: 'Combine signals into a shared timeline.',
    color: 'oklch(0.66 0.13 155)',
  },
};

// ---------------------------------------------------------------------------
// PRICING — PLACEHOLDER VALUES
//
// These prices are temporary placeholder values used for UI/demo estimation.
// Final prices should be set after measuring real processing cost,
// infrastructure cost, storage, bandwidth, and margin per endpoint.
//
// Do NOT treat current values as final business pricing. They drive the
// playground estimator, pricing page, and docs labels only.
//
// To update pricing once real benchmarks exist:
//   1. Edit PRICE_USD_PER_MIN below (numeric price per unit).
//   2. Edit PRICING_UNITS (further down) only if an endpoint's unit changes
//      (video_min | audio_min | request | output_min | clip_min).
// Every page reads through priceFor() / pricingUnit() / priceLabel() —
// no other file needs to change.
// ---------------------------------------------------------------------------
const PRICE_USD_PER_MIN = {
  'video/best-frames':        0.04,
  'video/best-frame-near':    0.03,
  'video/text-frames':        0.04,
  'video/detect-cuts':        0.02,
  'video/dedupe-frames':      0.02,
  'video/thumbnail-score':    0.04,
  'video/clip-window':        0.05,
  'video/clip-near':          0.05,
  'audio/detect-silence':     0.01,
  'audio/detect-laughter':    0.04,
  'audio/detect-energy':      0.02,
  'audio/detect-speakers':    0.05,
  'audio/detect-music':       0.03,
  'audio/isolate-speech':     0.06,
  'audio/suggest-cut-points': 0.03,
  'audio/semantic-chunks':    0.05,
  'timeline/merge':           0.005,
  'timeline/find-nearest':    0.005,
  'timeline/suggest-ranges':  0.01,
};

// Pricing unit per endpoint. Drives priceLabel() display.
// Allowed values: video_min | audio_min | request | output_min | clip_min
const PRICING_UNITS = {
  'video/best-frames':        'video_min',
  'video/best-frame-near':    'video_min',
  'video/text-frames':        'video_min',
  'video/detect-cuts':        'video_min',
  'video/dedupe-frames':      'video_min',
  'video/thumbnail-score':    'video_min',
  'video/clip-window':        'clip_min',
  'video/clip-near':          'clip_min',
  'audio/detect-silence':     'audio_min',
  'audio/detect-laughter':    'audio_min',
  'audio/detect-energy':      'audio_min',
  'audio/detect-speakers':    'audio_min',
  'audio/detect-music':       'audio_min',
  'audio/isolate-speech':     'output_min',
  'audio/suggest-cut-points': 'audio_min',
  'audio/semantic-chunks':    'audio_min',
  'timeline/merge':           'request',
  'timeline/find-nearest':    'request',
  'timeline/suggest-ranges':  'request',
};

const VALID_PRICING_UNITS = new Set(['video_min','audio_min','request','output_min','clip_min']);

const UNIT_LABELS = {
  video_min:  'video min',
  audio_min:  'audio min',
  request:    'request',
  output_min: 'output min',
  clip_min:   'clip min',
};

// Helpers — every page should read these, not hardcode.
const endpointIds                 = () => Object.keys(ENDPOINTS);
const endpointIdsByGroup          = (g) => Object.keys(ENDPOINTS).filter(id => ENDPOINTS[id].group === g);
const endpointGroupCount          = (g) => endpointIdsByGroup(g).length;
const priceFor                    = (id) => ENDPOINTS[id]?.priceUsd ?? PRICE_USD_PER_MIN[id] ?? 0;
const pricingUnit                 = (id) => ENDPOINTS[id]?.pricingUnit || PRICING_UNITS[id] || 'request';
const priceUnit                   = (id) => `/ ${UNIT_LABELS[pricingUnit(id)] || pricingUnit(id)}`;
const priceLabel                  = (id) => {
  const p = priceFor(id);
  const decimals = p < 0.01 ? 3 : 2;
  return `$${p.toFixed(decimals)} ${priceUnit(id)}`;
};
const docsHrefFor                 = (id) => `docs-${id.replace('/','-')}.html`;

// ---------------------------------------------------------------------------
// SEO metadata — single source of truth for every endpoint page <title>/<meta>.
// Endpoint HTML files set document.title from these at runtime via EndpointPage.
// ---------------------------------------------------------------------------
const SEO = {
  'video/best-frames':         { seoTitle: 'video/best-frames API — extract the best frames from a video | MomentIQ',         seoDescription: 'Return the clearest, most informative frames from a video, ranked. Request, response, and cURL/JS/Python examples for /v1/video/best-frames.' },
  'video/best-frame-near':     { seoTitle: 'video/best-frame-near API — best frame near a timestamp | MomentIQ',              seoDescription: 'Return the best still image near a target timestamp. Reference docs for /v1/video/best-frame-near with parameters, response schema, and code samples.' },
  'video/text-frames':         { seoTitle: 'video/text-frames API — frames containing slides, whiteboards, screens | MomentIQ', seoDescription: 'Find frames with readable text — slides, whiteboards, dashboards, terminals — with optional OCR. Reference docs for /v1/video/text-frames.' },
  'video/detect-cuts':         { seoTitle: 'video/detect-cuts API — shot boundaries and scene changes | MomentIQ',            seoDescription: 'Detect hard visual changes — shot boundaries, slide swaps, scene jumps. Reference docs for /v1/video/detect-cuts.' },
  'video/dedupe-frames':       { seoTitle: 'video/dedupe-frames API — remove repetitive frames | MomentIQ',                   seoDescription: 'Drop visually-repetitive frames from a list and keep one representative per cluster. Reference docs for /v1/video/dedupe-frames.' },
  'video/thumbnail-score':     { seoTitle: 'video/thumbnail-score API — rank frames for thumbnail use | MomentIQ',            seoDescription: 'Score frames for thumbnail suitability — face presence, contrast, focal interest. Reference docs for /v1/video/thumbnail-score.' },
  'video/clip-window':         { seoTitle: 'video/clip-window API — render a clip from start to end | MomentIQ',              seoDescription: 'Cut a clip from a start and end timestamp. Returns a playable URL. Reference docs for /v1/video/clip-window.' },
  'video/clip-near':           { seoTitle: 'video/clip-near API — render a clip centered on a moment | MomentIQ',             seoDescription: 'Cut a clip centered on a timestamp with smart boundary alignment to silence and cuts. Reference docs for /v1/video/clip-near.' },
  'audio/detect-silence':      { seoTitle: 'audio/detect-silence API — find silence regions in audio | MomentIQ',              seoDescription: 'Find silence regions, breath pauses, and natural cut points. Reference docs for /v1/audio/detect-silence with parameters and examples.' },
  'audio/detect-laughter':     { seoTitle: 'audio/detect-laughter API — detect laughter in podcasts and streams | MomentIQ',  seoDescription: 'Detect laughter moments with confidence scores. Perfect for podcast clipping. Reference docs for /v1/audio/detect-laughter.' },
  'audio/detect-energy':       { seoTitle: 'audio/detect-energy API — score audio energy over time | MomentIQ',                seoDescription: 'Score audio energy over time — find loud, intense, or quiet parts. Reference docs for /v1/audio/detect-energy.' },
  'audio/detect-speakers':     { seoTitle: 'audio/detect-speakers API — speaker diarization | MomentIQ',                       seoDescription: 'Speaker diarization — who spoke when, with optional overlap detection. Reference docs for /v1/audio/detect-speakers.' },
  'audio/detect-music':        { seoTitle: 'audio/detect-music API — detect music sections | MomentIQ',                        seoDescription: 'Detect music sections and speech-vs-music overlap. Reference docs for /v1/audio/detect-music.' },
  'audio/isolate-speech':      { seoTitle: 'audio/isolate-speech API — clean speech-only audio | MomentIQ',                    seoDescription: 'Return a clean speech-only audio track with noise and music removed. Reference docs for /v1/audio/isolate-speech.' },
  'audio/suggest-cut-points':  { seoTitle: 'audio/suggest-cut-points API — natural edit boundaries | MomentIQ',                seoDescription: 'Suggest natural edit boundaries from silence + speaker changes + topic shifts. Reference docs for /v1/audio/suggest-cut-points.' },
  'audio/semantic-chunks':     { seoTitle: 'audio/semantic-chunks API — split audio by topic and speaker | MomentIQ',          seoDescription: 'Split audio into topic-aware chunks for RAG indexing. Reference docs for /v1/audio/semantic-chunks.' },
  'timeline/merge':            { seoTitle: 'timeline/merge API — combine signals into one media timeline | MomentIQ',          seoDescription: 'Combine outputs from any MomentIQ endpoint into a single shared timeline. Reference docs for /v1/timeline/merge.' },
  'timeline/find-nearest':     { seoTitle: 'timeline/find-nearest API — nearest event of a kind | MomentIQ',                   seoDescription: 'Given a timestamp and event kind, return the nearest event. Reference docs for /v1/timeline/find-nearest.' },
  'timeline/suggest-ranges':   { seoTitle: 'timeline/suggest-ranges API — recommend clip ranges | MomentIQ',                   seoDescription: 'Convert a merged timeline into render-ready clip ranges. Reference docs for /v1/timeline/suggest-ranges.' },
};

// ---------------------------------------------------------------------------
// Playground metadata — drives the shared <Playground> component.
//   playgroundEnabled    — show in selector
//   playgroundLabel      — short label for the selector / header
//   playgroundDescription — 1-line description shown above the controls
//   playgroundControls   — array of input descriptors:
//                            { key, type, label, default, hint?, options?, min?, max?, step? }
//                          types: 'url' | 'text' | 'number' | 'select' | 'toggle' | 'json'
//   defaultRequest       — initial request body (merged with current control values)
//   fakeResponseType     — 'frames' | 'timeline-segments' | 'clip' | 'signal'
// ---------------------------------------------------------------------------

const _u = 'https://cdn.momentiq.dev/demo/lecture-08.mp4';
const _p = 'https://cdn.momentiq.dev/demo/podcast-ep-42.mp4';
const _m = 'https://cdn.momentiq.dev/demo/all-hands.mp4';

const PLAYGROUND = {
  'video/best-frames': {
    playgroundEnabled: true,
    playgroundLabel: 'Pick the best frames',
    playgroundDescription: 'Return the clearest, most informative frames in a video — ranked.',
    playgroundControls: [
      { key:'media_url',  type:'url',    label:'media_url',  default:_u },
      { key:'max_frames', type:'number', label:'max_frames', default:8, min:1, max:64, step:1 },
      { key:'min_gap',    type:'number', label:'min_gap',    default:6, min:0, max:60, step:1, hint:'seconds' },
      { key:'quality',    type:'select', label:'quality',    default:'std', options:['low','std','high'] },
    ],
    defaultRequest: { media_url:_u, max_frames:8, min_gap:6, quality:'std' },
    fakeResponseType: 'frames',
  },
  'video/best-frame-near': {
    playgroundEnabled: true,
    playgroundLabel: 'Best frame near a timestamp',
    playgroundDescription: 'One clean cover frame near a moment you already know about.',
    playgroundControls: [
      { key:'media_url', type:'url',    label:'media_url', default:_p },
      { key:'t',         type:'number', label:'t',         default:862.4, min:0, step:0.1, hint:'seconds' },
      { key:'window',    type:'number', label:'window',    default:3, min:0.5, max:30, step:0.5 },
    ],
    defaultRequest: { media_url:_p, t:862.4, window:3 },
    fakeResponseType: 'frames',
  },
  'video/text-frames': {
    playgroundEnabled: true,
    playgroundLabel: 'Slide & whiteboard frames',
    playgroundDescription: 'Find frames containing readable text — for RAG, lecture notes, demo recaps.',
    playgroundControls: [
      { key:'media_url', type:'url',    label:'media_url', default:_u },
      { key:'ocr',       type:'toggle', label:'ocr',       default:true,  hint:'include OCR text' },
      { key:'dedupe',    type:'toggle', label:'dedupe',    default:true,  hint:'drop near-duplicates' },
      { key:'min_text_density', type:'number', label:'min_text_density', default:0.05, min:0, max:1, step:0.01 },
    ],
    defaultRequest: { media_url:_u, ocr:true, dedupe:true },
    fakeResponseType: 'frames',
  },
  'video/detect-cuts': {
    playgroundEnabled: true,
    playgroundLabel: 'Shot boundaries',
    playgroundDescription: 'Detect hard visual changes — shot boundaries, slide swaps, scene jumps.',
    playgroundControls: [
      { key:'media_url',   type:'url',    label:'media_url',   default:_p },
      { key:'sensitivity', type:'number', label:'sensitivity', default:0.6, min:0, max:1, step:0.05 },
      { key:'min_segment', type:'number', label:'min_segment', default:0.4, min:0, max:30, step:0.1, hint:'seconds' },
    ],
    defaultRequest: { media_url:_p, sensitivity:0.6 },
    fakeResponseType: 'timeline-segments',
  },
  'video/dedupe-frames': {
    playgroundEnabled: true,
    playgroundLabel: 'Dedupe frame list',
    playgroundDescription: 'Remove visually-repetitive frames — keep one representative per cluster.',
    playgroundControls: [
      { key:'frames',    type:'json',   label:'frames',    default:[{t:12.4},{t:12.6},{t:88.0}], hint:'Frame[]' },
      { key:'threshold', type:'number', label:'threshold', default:0.92, min:0.5, max:1, step:0.01 },
    ],
    defaultRequest: { frames:[{t:12.4},{t:12.6},{t:88.0}], threshold:0.92 },
    fakeResponseType: 'frames',
  },
  'video/thumbnail-score': {
    playgroundEnabled: true,
    playgroundLabel: 'Score frames for thumbnails',
    playgroundDescription: 'Rank a frame set by face presence, expression, contrast, focal interest.',
    playgroundControls: [
      { key:'frames', type:'json',   label:'frames', default:[{t:312},{t:901}], hint:'Frame[]' },
      { key:'target', type:'select', label:'target', default:'clip', options:['clip','video','chapter'] },
    ],
    defaultRequest: { frames:[{t:312},{t:901}], target:'clip' },
    fakeResponseType: 'frames',
  },
  'video/clip-window': {
    playgroundEnabled: true,
    playgroundLabel: 'Render a clip',
    playgroundDescription: 'Cut a clip from start to end. Returns a playable URL.',
    playgroundControls: [
      { key:'local_path', type:'text',   label:'local_path', default:'C:\\Users\\matty\\Downloads\\Comedy_Test_Recording.mp4' },
      { key:'start',      type:'number', label:'start',      default:0, min:0, step:1, hint:'seconds' },
      { key:'end',        type:'number', label:'end',        default:2, min:0, step:1, hint:'seconds' },
      { key:'format',    type:'select', label:'format',    default:'mp4', options:['mp4','mov','mp3'] },
    ],
    defaultRequest: { local_path:'C:\\Users\\matty\\Downloads\\Comedy_Test_Recording.mp4', start:0, end:2, format:'mp4' },
    fakeResponseType: 'clip',
  },
  'video/clip-near': {
    playgroundEnabled: true,
    playgroundLabel: 'Smart-boundary clip',
    playgroundDescription: 'Cut a clip centered on a moment, snapped to silence + cut boundaries.',
    playgroundControls: [
      { key:'media_url',       type:'url',    label:'media_url',       default:_p },
      { key:'t',               type:'number', label:'t',               default:862.4, min:0, step:0.1 },
      { key:'target_duration', type:'number', label:'target_duration', default:30, min:5, max:600, step:1 },
      { key:'snap',            type:'toggle', label:'snap',            default:true, hint:'snap to silence/cuts' },
    ],
    defaultRequest: { media_url:_p, t:862.4, target_duration:30, snap:true },
    fakeResponseType: 'clip',
  },
  'audio/detect-silence': {
    playgroundEnabled: true,
    playgroundLabel: 'Find silences',
    playgroundDescription: 'Surface silence regions — natural edit boundaries.',
    playgroundControls: [
      { key:'media_url',    type:'url',    label:'media_url',    default:_p },
      { key:'min_duration', type:'number', label:'min_duration', default:0.4, min:0.1, max:10, step:0.1, hint:'seconds' },
      { key:'threshold_db', type:'number', label:'threshold_db', default:-35, min:-60, max:-10, step:1, hint:'dB' },
    ],
    defaultRequest: { media_url:_p, min_duration:0.4 },
    fakeResponseType: 'signal',
  },
  'audio/detect-laughter': {
    playgroundEnabled: true,
    playgroundLabel: 'Find laughs',
    playgroundDescription: 'Detect laughter moments with confidence scores.',
    playgroundControls: [
      { key:'media_url',   type:'url',    label:'media_url',   default:_p },
      { key:'sensitivity', type:'number', label:'sensitivity', default:0.62, min:0, max:1, step:0.02 },
      { key:'min_duration',type:'number', label:'min_duration',default:0.4,  min:0.1, max:5, step:0.1, hint:'seconds' },
    ],
    defaultRequest: { media_url:_p, sensitivity:0.62 },
    fakeResponseType: 'signal',
  },
  'audio/detect-energy': {
    playgroundEnabled: true,
    playgroundLabel: 'Score audio energy',
    playgroundDescription: 'Find loud / intense / quiet parts. Useful for energy heatmaps.',
    playgroundControls: [
      { key:'media_url',  type:'url',    label:'media_url',  default:_p },
      { key:'resolution', type:'number', label:'resolution', default:0.5, min:0.1, max:5, step:0.1, hint:'seconds' },
    ],
    defaultRequest: { media_url:_p, resolution:0.5 },
    fakeResponseType: 'signal',
  },
  'audio/detect-speakers': {
    playgroundEnabled: true,
    playgroundLabel: 'Speaker diarization',
    playgroundDescription: 'Who spoke when, with optional overlap detection.',
    playgroundControls: [
      { key:'media_url',          type:'url',    label:'media_url',          default:_m },
      { key:'estimated_speakers', type:'number', label:'estimated_speakers', default:3, min:1, max:20, step:1, hint:'optional hint' },
      { key:'detect_overlap',     type:'toggle', label:'detect_overlap',     default:true },
    ],
    defaultRequest: { media_url:_m, estimated_speakers:3, detect_overlap:true },
    fakeResponseType: 'signal',
  },
  'audio/detect-music': {
    playgroundEnabled: true,
    playgroundLabel: 'Find music sections',
    playgroundDescription: 'Detect music regions and speech-vs-music overlap.',
    playgroundControls: [
      { key:'media_url',    type:'url',    label:'media_url',    default:_p },
      { key:'min_duration', type:'number', label:'min_duration', default:1.5, min:0.5, max:30, step:0.5 },
    ],
    defaultRequest: { media_url:_p, min_duration:1.5 },
    fakeResponseType: 'signal',
  },
  'audio/isolate-speech': {
    playgroundEnabled: true,
    playgroundLabel: 'Clean speech-only audio',
    playgroundDescription: 'Strip noise and music. Returns a playable cleaned URL.',
    playgroundControls: [
      { key:'media_url',       type:'url',    label:'media_url',       default:_p },
      { key:'format',          type:'select', label:'format',          default:'mp3', options:['mp3','wav'] },
      { key:'preserve_levels', type:'toggle', label:'preserve_levels', default:false },
    ],
    defaultRequest: { media_url:_p, format:'mp3' },
    fakeResponseType: 'clip',
  },
  'audio/suggest-cut-points': {
    playgroundEnabled: true,
    playgroundLabel: 'Natural edit boundaries',
    playgroundDescription: 'Silence + speaker change + topic shift cut points.',
    playgroundControls: [
      { key:'media_url', type:'url',  label:'media_url', default:_p },
      { key:'around',    type:'json', label:'around',    default:[862.4], hint:'number[] · seconds' },
    ],
    defaultRequest: { media_url:_p, around:[862.4] },
    fakeResponseType: 'signal',
  },
  'audio/semantic-chunks': {
    playgroundEnabled: true,
    playgroundLabel: 'Topic-aware chunks',
    playgroundDescription: 'Split audio by topic and speaker — for RAG indexing.',
    playgroundControls: [
      { key:'media_url',           type:'url',    label:'media_url',           default:_u },
      { key:'target_chunk_seconds',type:'number', label:'target_chunk_seconds',default:60, min:10, max:600, step:5 },
    ],
    defaultRequest: { media_url:_u, target_chunk_seconds:60 },
    fakeResponseType: 'timeline-segments',
  },
  'timeline/merge': {
    playgroundEnabled: true,
    playgroundLabel: 'Merge signals into a timeline',
    playgroundDescription: 'Combine results from any endpoint into one canonical timeline.',
    playgroundControls: [
      { key:'media_seconds', type:'number', label:'media_seconds', default:2892, min:1, step:1 },
      { key:'signals',       type:'json',   label:'signals',       default:[{kind:'laughter',events:[{t:862.4,dur:1.8}]},{kind:'silence',events:[{t:859.9,dur:0.4}]}], hint:'Signal[]' },
    ],
    defaultRequest: { media_seconds:2892, signals:[{kind:'laughter',events:[{t:862.4,dur:1.8}]},{kind:'silence',events:[{t:859.9,dur:0.4}]}] },
    fakeResponseType: 'timeline-segments',
  },
  'timeline/find-nearest': {
    playgroundEnabled: true,
    playgroundLabel: 'Nearest event',
    playgroundDescription: 'Find the nearest event of a kind to a timestamp.',
    playgroundControls: [
      { key:'timeline',  type:'json',   label:'timeline',  default:[{kind:'silence',t:859.9,dur:0.4},{kind:'laughter',t:862.4,dur:1.8}], hint:'Event[]' },
      { key:'t',         type:'number', label:'t',         default:862.4, step:0.1 },
      { key:'kind',      type:'select', label:'kind',      default:'silence', options:['silence','laughter','speaker_change','energy_peak','cut','music'] },
      { key:'direction', type:'select', label:'direction', default:'before', options:['before','after','any'] },
    ],
    defaultRequest: { t:862.4, kind:'silence', direction:'before' },
    fakeResponseType: 'signal',
  },
  'timeline/suggest-ranges': {
    playgroundEnabled: true,
    playgroundLabel: 'Recommend clip ranges',
    playgroundDescription: 'Convert a merged timeline into render-ready clip ranges.',
    playgroundControls: [
      { key:'anchors',  type:'json',   label:'anchors',  default:['laughter','energy_peak'], hint:'string[]' },
      { key:'min_clip', type:'number', label:'min_clip', default:12, min:1, max:600, step:1 },
      { key:'max_clip', type:'number', label:'max_clip', default:45, min:1, max:600, step:1 },
    ],
    defaultRequest: { anchors:['laughter','energy_peak'], min_clip:12, max_clip:45 },
    fakeResponseType: 'timeline-segments',
  },
};

// ---------------------------------------------------------------------------
// Field normalization: convert any tuple-form params/responseFields entries
// (legacy ["name","type","desc",required?]) into the canonical object form.
//   params         → { name, type, description, required }
//   responseFields → { name, type, description }
// Renderers should read these objects. Tuple form is no longer authoritative.
// ---------------------------------------------------------------------------
const _normalizeParam = (f) => {
  if (!Array.isArray(f)) return f;
  const [name, type, description, required] = f;
  return { name, type, description, required: !!required };
};
const _normalizeResponseField = (f) => {
  if (!Array.isArray(f)) return f;
  const [name, type, description] = f;
  return { name, type, description };
};

// Apply SEO + Playground + pricingUnit onto each endpoint,
// normalize params/responseFields, and remove the legacy `price` string.
Object.keys(ENDPOINTS).forEach(id => {
  Object.assign(ENDPOINTS[id], SEO[id] || {}, PLAYGROUND[id] || {});
  if (!ENDPOINTS[id].seoTitle)       ENDPOINTS[id].seoTitle = `${ENDPOINTS[id].path} — MomentIQ Docs`;
  if (!ENDPOINTS[id].seoDescription) ENDPOINTS[id].seoDescription = ENDPOINTS[id].short;

  // pricingUnit + priceUsd — single source of truth for pricing display.
  // (Placeholder values — see PRICE_USD_PER_MIN comment block above.)
  ENDPOINTS[id].pricingUnit = PRICING_UNITS[id] || (id.startsWith('timeline/') ? 'request' : `${id.split('/')[0]}_min`);
  ENDPOINTS[id].priceUsd    = PRICE_USD_PER_MIN[id] ?? 0;

  // Field normalization.
  if (Array.isArray(ENDPOINTS[id].params))         ENDPOINTS[id].params         = ENDPOINTS[id].params.map(_normalizeParam);
  if (Array.isArray(ENDPOINTS[id].responseFields)) ENDPOINTS[id].responseFields = ENDPOINTS[id].responseFields.map(_normalizeResponseField);

  delete ENDPOINTS[id].price;        // legacy duplicated string — pages now use priceLabel(id)
});

// ---------------------------------------------------------------------------
// Validation — runs at module load. Warnings only; never throws.
// Checks:
//   - every endpoint has a valid pricingUnit
//   - per-request endpoints are not displayed as "per minute"
//   - every required param appears in requestExample
//   - every playgroundControl key exists in params (or has uiOnly: true)
//   - every responseField is an object with name/type/description
//   - every param is an object with name/type/description/required
// ---------------------------------------------------------------------------
const validateEndpoints = () => {
  const issues = [];
  Object.keys(ENDPOINTS).forEach(id => {
    const ep = ENDPOINTS[id];

    // pricingUnit
    if (!ep.pricingUnit)                       issues.push(`[${id}] missing pricingUnit`);
    else if (!VALID_PRICING_UNITS.has(ep.pricingUnit)) issues.push(`[${id}] invalid pricingUnit "${ep.pricingUnit}"`);

    // per-request must not render as per-minute
    if (ep.pricingUnit === 'request') {
      const label = priceLabel(id);
      if (/\bmin\b/.test(label))               issues.push(`[${id}] pricingUnit=request but priceLabel says "${label}"`);
    }

    // params object form
    (ep.params || []).forEach((p, i) => {
      if (!p || typeof p !== 'object' || Array.isArray(p)) {
        issues.push(`[${id}] params[${i}] is not an object`); return;
      }
      if (!p.name)                             issues.push(`[${id}] params[${i}] missing name`);
      if (!p.type)                             issues.push(`[${id}] params[${i}].${p.name||i} missing type`);
      if (!p.description)                      issues.push(`[${id}] params[${i}].${p.name||i} missing description`);
      if (typeof p.required !== 'boolean')     issues.push(`[${id}] params[${i}].${p.name||i} required must be boolean`);
    });

    // responseFields object form
    (ep.responseFields || []).forEach((r, i) => {
      if (!r || typeof r !== 'object' || Array.isArray(r)) {
        issues.push(`[${id}] responseFields[${i}] is not an object`); return;
      }
      if (!r.name)                             issues.push(`[${id}] responseFields[${i}] missing name`);
      if (!r.type)                             issues.push(`[${id}] responseFields[${i}].${r.name||i} missing type`);
      if (!r.description)                      issues.push(`[${id}] responseFields[${i}].${r.name||i} missing description`);
    });

    // every required param appears in requestExample
    const exampleKeys = new Set(Object.keys(ep.requestExample || {}));
    (ep.params || []).forEach(p => {
      if (p.required && !exampleKeys.has(p.name)) {
        issues.push(`[${id}] requestExample missing required param "${p.name}"`);
      }
    });

    // every playgroundControl key exists in params, or is marked uiOnly
    const paramNames = new Set((ep.params || []).map(p => p.name));
    (ep.playgroundControls || []).forEach(c => {
      if (c.uiOnly) return;
      if (!paramNames.has(c.key)) {
        issues.push(`[${id}] playgroundControl "${c.key}" has no matching param (add uiOnly:true if intentional)`);
      }
    });
  });

  if (issues.length) {
    console.warn(`[endpoint-data] ${issues.length} validation issue(s):`);
    issues.forEach(i => console.warn('  • ' + i));
  } else {
    console.info('[endpoint-data] validation OK — ' + Object.keys(ENDPOINTS).length + ' endpoints clean.');
  }
  return issues;
};
validateEndpoints();

// Helpers added in this layer.
const playgroundEnabledIds = () => endpointIds().filter(id => ENDPOINTS[id].playgroundEnabled !== false);
const seoTitle             = (id) => ENDPOINTS[id]?.seoTitle || '';
const seoDescription       = (id) => ENDPOINTS[id]?.seoDescription || '';

window.ENDPOINTS = ENDPOINTS;
window.GROUPS = GROUPS;
window.PRICE_USD_PER_MIN = PRICE_USD_PER_MIN;
window.PRICING_UNITS = PRICING_UNITS;
window.UNIT_LABELS = UNIT_LABELS;
window.endpointIds = endpointIds;
window.endpointIdsByGroup = endpointIdsByGroup;
window.endpointGroupCount = endpointGroupCount;
window.playgroundEnabledIds = playgroundEnabledIds;
window.priceFor = priceFor;
window.priceUnit = priceUnit;
window.pricingUnit = pricingUnit;
window.priceLabel = priceLabel;
window.docsHrefFor = docsHrefFor;
window.seoTitle = seoTitle;
window.seoDescription = seoDescription;
window.validateEndpoints = validateEndpoints;
